From c78f56bd4f94c5e558c8742a2a82e916a2c19b86 Mon Sep 17 00:00:00 2001 From: "ci.datadog-api-spec" Date: Tue, 17 Mar 2026 20:29:47 +0000 Subject: [PATCH] Regenerate client from commit bd40a10 of spec repo --- .../v1/service-level-objectives/examples.json | 12 ++-- .../request.CreateSLO_707861409.json | 43 +++++++++++++ .../v2/observability-pipelines/examples.json | 21 ++++-- data/api/v1/CodeExamples.json | 5 ++ data/api/v1/full_spec.yaml | 64 +++++++++++++++---- data/api/v2/full_spec.yaml | 5 ++ 6 files changed, 125 insertions(+), 25 deletions(-) create mode 100644 content/en/api/v1/service-level-objectives/request.CreateSLO_707861409.json diff --git a/content/en/api/v1/service-level-objectives/examples.json b/content/en/api/v1/service-level-objectives/examples.json index 441d5410178..2804ff4ab23 100644 --- a/content/en/api/v1/service-level-objectives/examples.json +++ b/content/en/api/v1/service-level-objectives/examples.json @@ -69,7 +69,7 @@ } } }, - "html": "
\n
\n
\n
\n

data

\n
\n

[object]

\n

An array of service level objective objects.

\n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

object

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula, the total events formula,\nand the underlying queries.

\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

object

\n

The metadata object containing additional information about the list of SLOs.

\n
\n
\n
\n
\n
\n

page

\n
\n

object

\n

The object containing information about the pages of the list of SLOs.

\n
\n
\n
\n
\n
\n

total_count

\n
\n

int64

\n

The total number of resources that could be retrieved ignoring the parameters and filters in the request.

\n
\n \n
\n
\n
\n
\n
\n

total_filtered_count

\n
\n

int64

\n

The total number of resources that match the parameters and filters in the request. This attribute can be used by a client to determine the total number of pages.

\n
\n \n
\n
\n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data

\n
\n

[object]

\n

An array of service level objective objects.

\n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

 <oneOf>

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula,\nthe bad or total events formula, and the underlying queries.\nExactly one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

The total events formula. Bad events queries can be defined using the bad_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

bad_events_formula [required]

\n
\n

object

\n

The bad events formula (recommended). Total events queries can be defined using the total_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

object

\n

The metadata object containing additional information about the list of SLOs.

\n
\n
\n
\n
\n
\n

page

\n
\n

object

\n

The object containing information about the pages of the list of SLOs.

\n
\n
\n
\n
\n
\n

total_count

\n
\n

int64

\n

The total number of resources that could be retrieved ignoring the parameters and filters in the request.

\n
\n \n
\n
\n
\n
\n
\n

total_filtered_count

\n
\n

int64

\n

The total number of resources that match the parameters and filters in the request. This attribute can be used by a client to determine the total number of pages.

\n
\n \n
\n
\n
\n
\n
\n
" }, "400": { "json": { @@ -180,7 +180,7 @@ } } }, - "html": "
\n
\n
\n
\n

data

\n
\n

[object]

\n

An array of service level objective objects.

\n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

object

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula, the total events formula,\nand the underlying queries.

\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

object

\n

The metadata object containing additional information about the list of SLOs.

\n
\n
\n
\n
\n
\n

page

\n
\n

object

\n

The object containing information about the pages of the list of SLOs.

\n
\n
\n
\n
\n
\n

total_count

\n
\n

int64

\n

The total number of resources that could be retrieved ignoring the parameters and filters in the request.

\n
\n \n
\n
\n
\n
\n
\n

total_filtered_count

\n
\n

int64

\n

The total number of resources that match the parameters and filters in the request. This attribute can be used by a client to determine the total number of pages.

\n
\n \n
\n
\n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data

\n
\n

[object]

\n

An array of service level objective objects.

\n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

 <oneOf>

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula,\nthe bad or total events formula, and the underlying queries.\nExactly one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

The total events formula. Bad events queries can be defined using the bad_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

bad_events_formula [required]

\n
\n

object

\n

The bad events formula (recommended). Total events queries can be defined using the total_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

object

\n

The metadata object containing additional information about the list of SLOs.

\n
\n
\n
\n
\n
\n

page

\n
\n

object

\n

The object containing information about the pages of the list of SLOs.

\n
\n
\n
\n
\n
\n

total_count

\n
\n

int64

\n

The total number of resources that could be retrieved ignoring the parameters and filters in the request.

\n
\n \n
\n
\n
\n
\n
\n

total_filtered_count

\n
\n

int64

\n

The total number of resources that match the parameters and filters in the request. This attribute can be used by a client to determine the total number of pages.

\n
\n \n
\n
\n
\n
\n
\n
" }, "400": { "json": { @@ -285,7 +285,7 @@ "type": "metric", "warning_threshold": 99.95 }, - "html": "
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor IDs that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

object

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula, the total events formula,\nand the underlying queries.

\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
" + "html": "
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor IDs that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

 <oneOf>

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula,\nthe bad or total events formula, and the underlying queries.\nExactly one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

The total events formula. Bad events queries can be defined using the bad_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

bad_events_formula [required]

\n
\n

object

\n

The bad events formula (recommended). Total events queries can be defined using the total_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
" } }, "DeleteSLOTimeframeInBulk": { @@ -717,7 +717,7 @@ }, "errors": [] }, - "html": "
\n
\n
\n
\n

data

\n
\n

object

\n

A service level objective object includes a service level indicator, thresholds\nfor one or more timeframes, and metadata (name, description, tags, etc.).

\n
\n
\n
\n
\n
\n

configured_alert_ids

\n
\n

[integer]

\n

A list of SLO monitors IDs that reference this SLO. This field is returned only when with_configured_alert_ids parameter is true in query.

\n
\n \n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 20) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

The metric query used to define a count-based SLO as the ratio of good events to total events.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is currently used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

object

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula, the total events formula,\nand the underlying queries.

\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
" + "html": "
\n
\n
\n
\n

data

\n
\n

object

\n

A service level objective object includes a service level indicator, thresholds\nfor one or more timeframes, and metadata (name, description, tags, etc.).

\n
\n
\n
\n
\n
\n

configured_alert_ids

\n
\n

[integer]

\n

A list of SLO monitors IDs that reference this SLO. This field is returned only when with_configured_alert_ids parameter is true in query.

\n
\n \n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 20) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

The metric query used to define a count-based SLO as the ratio of good events to total events.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is currently used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

 <oneOf>

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula,\nthe bad or total events formula, and the underlying queries.\nExactly one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

The total events formula. Bad events queries can be defined using the bad_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

bad_events_formula [required]

\n
\n

object

\n

The bad events formula (recommended). Total events queries can be defined using the total_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
" }, "403": { "json": { @@ -820,7 +820,7 @@ } } }, - "html": "
\n
\n
\n
\n

data

\n
\n

[object]

\n

An array of service level objective objects.

\n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

object

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula, the total events formula,\nand the underlying queries.

\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

object

\n

The metadata object containing additional information about the list of SLOs.

\n
\n
\n
\n
\n
\n

page

\n
\n

object

\n

The object containing information about the pages of the list of SLOs.

\n
\n
\n
\n
\n
\n

total_count

\n
\n

int64

\n

The total number of resources that could be retrieved ignoring the parameters and filters in the request.

\n
\n \n
\n
\n
\n
\n
\n

total_filtered_count

\n
\n

int64

\n

The total number of resources that match the parameters and filters in the request. This attribute can be used by a client to determine the total number of pages.

\n
\n \n
\n
\n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data

\n
\n

[object]

\n

An array of service level objective objects.

\n
\n
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

 <oneOf>

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula,\nthe bad or total events formula, and the underlying queries.\nExactly one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

The total events formula. Bad events queries can be defined using the bad_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

bad_events_formula [required]

\n
\n

object

\n

The bad events formula (recommended). Total events queries can be defined using the total_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

errors

\n
\n

[string]

\n

An array of error messages. Each endpoint documents how/whether this field is\nused.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

object

\n

The metadata object containing additional information about the list of SLOs.

\n
\n
\n
\n
\n
\n

page

\n
\n

object

\n

The object containing information about the pages of the list of SLOs.

\n
\n
\n
\n
\n
\n

total_count

\n
\n

int64

\n

The total number of resources that could be retrieved ignoring the parameters and filters in the request.

\n
\n \n
\n
\n
\n
\n
\n

total_filtered_count

\n
\n

int64

\n

The total number of resources that match the parameters and filters in the request. This attribute can be used by a client to determine the total number of pages.

\n
\n \n
\n
\n
\n
\n
\n
" }, "400": { "json": { @@ -934,7 +934,7 @@ "type": "metric", "warning_threshold": 99.95 }, - "html": "
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

object

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula, the total events formula,\nand the underlying queries.

\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
" + "html": "
\n
\n
\n
\n

created_at

\n
\n

int64

\n

Creation timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

creator

\n
\n

object

\n

Object describing the creator of the shared element.

\n
\n
\n
\n
\n
\n

email

\n
\n

string

\n

Email of the creator.

\n
\n \n
\n
\n
\n
\n
\n

handle

\n
\n

string

\n

Handle of the creator.

\n
\n \n
\n
\n
\n
\n
\n

name

\n
\n

string

\n

Name of the creator.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

A user-defined description of the service level objective.

Always included in service level objective responses (but may be null).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

groups

\n
\n

[string]

\n

A list of (up to 100) monitor groups that narrow the scope of a monitor service level objective.

Included in service level objective responses if it is not empty. Optional in\ncreate/update requests for monitor service level objectives, but may only be\nused when then length of the monitor_ids field is one.

\n
\n \n
\n
\n
\n
\n
\n

id

\n
\n

string

\n

A unique identifier for the service level objective object.

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

modified_at

\n
\n

int64

\n

Modification timestamp (UNIX time in seconds)

Always included in service level objective responses.

\n
\n \n
\n
\n
\n
\n
\n

monitor_ids

\n
\n

[integer]

\n

A list of monitor ids that defines the scope of a monitor service level\nobjective. Required if type is monitor.

\n
\n \n
\n
\n
\n
\n
\n

monitor_tags

\n
\n

[string]

\n

The union of monitor tags for all monitors referenced by the monitor_ids\nfield.\nAlways included in service level objective responses for monitor-based service level\nobjectives (but may be empty). Ignored in create/update requests. Does not\naffect which monitors are included in the service level objective (that is\ndetermined entirely by the monitor_ids field).

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the service level objective object.

\n
\n \n
\n
\n
\n
\n
\n

query

\n
\n

object

\n

A count-based (metric) SLO query. This field is superseded by sli_specification but is retained for backwards compatibility. Note that Datadog only allows the sum by aggregator\nto be used because this will sum up all request counts instead of averaging them, or taking the max or\nmin of all of those requests.

\n
\n
\n
\n
\n
\n

denominator [required]

\n
\n

string

\n

A Datadog metric query for total (valid) events.

\n
\n \n
\n
\n
\n
\n
\n

numerator [required]

\n
\n

string

\n

A Datadog metric query for good events.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sli_specification

\n
\n

 <oneOf>

\n

A generic SLI specification. This is used for time-slice and count-based (metric) SLOs only.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A time-slice SLI specification.

\n
\n
\n
\n
\n
\n

time_slice [required]

\n
\n

object

\n

The time-slice condition, composed of 3 parts: 1. the metric timeseries query, 2. the comparator,\nand 3. the threshold. Optionally, a fourth part, the query interval, can be provided.

\n
\n
\n
\n
\n
\n

comparator [required]

\n
\n

enum

\n

The comparator used to compare the SLI value to the threshold. \nAllowed enum values: >,>=,<,<=

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

object

\n

The queries and formula used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

formulas [required]

\n
\n

[object]

\n

A list that contains exactly one formula, as only a single formula may be used in a time-slice SLO.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n

A list of queries that are used to calculate the SLI value.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

query_interval_seconds

\n
\n

enum

\n

The interval used when querying data, which defines the size of a time slice.\nTwo values are allowed: 60 (1 minute) and 300 (5 minutes).\nIf not provided, the value defaults to 300 (5 minutes). \nAllowed enum values: 60,300

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

double

\n

The threshold value to which each SLI value will be compared.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

A metric SLI specification.

\n
\n
\n
\n
\n
\n

count [required]

\n
\n

 <oneOf>

\n

A count-based (metric) SLI specification, composed of three parts: the good events formula,\nthe bad or total events formula, and the underlying queries.\nExactly one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

total_events_formula [required]

\n
\n

object

\n

The total events formula. Bad events queries can be defined using the bad_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n
\n
\n
\n
\n
\n
\n

bad_events_formula [required]

\n
\n

object

\n

The bad events formula (recommended). Total events queries can be defined using the total_events_formula field as an alternative. Only one of total_events_formula or bad_events_formula must be provided.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

good_events_formula [required]

\n
\n

object

\n

A formula that specifies how to combine the results of multiple queries.

\n
\n
\n
\n
\n
\n

formula [required]

\n
\n

string

\n

The formula string, which is an expression involving named queries.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

queries [required]

\n
\n

[ <oneOf>]

\n
\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

A formula and functions metrics query.

\n
\n
\n
\n
\n
\n

aggregator

\n
\n

enum

\n

The aggregation methods available for metrics queries. \nAllowed enum values: avg,min,max,sum,last,area,l2norm,percentile

\n
\n \n
\n
\n
\n
\n
\n

cross_org_uuids

\n
\n

[string]

\n

The source organization UUID for cross organization queries. Feature in Private Beta.

\n
\n \n
\n
\n
\n
\n
\n

data_source [required]

\n
\n

enum

\n

Data source for metrics queries. \nAllowed enum values: metrics

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the query for use in formulas.

\n
\n \n
\n
\n
\n
\n
\n

query [required]

\n
\n

string

\n

Metrics query definition.

\n
\n \n
\n
\n
\n
\n
\n

semantic_mode

\n
\n

enum

\n

Semantic mode for metrics queries. This determines how metrics from different sources are combined or displayed. \nAllowed enum values: combined,native

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags

\n
\n

[string]

\n

A list of tags associated with this service level objective.\nAlways included in service level objective responses (but may be empty).\nOptional in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n

target_threshold

\n
\n

double

\n

The target threshold such that when the service level indicator is above this\nthreshold over the given timeframe, the objective is being met.

\n
\n \n
\n
\n
\n
\n
\n

thresholds [required]

\n
\n

[object]

\n

The thresholds (timeframes and associated targets) for this service level\nobjective object.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

double

\n

The target value for the service level indicator within the corresponding\ntimeframe.

\n
\n \n
\n
\n
\n
\n
\n

target_display

\n
\n

string

\n

A string representation of the target that indicates its precision.\nIt uses trailing zeros to show significant decimal places (for example 98.00).

Always included in service level objective responses. Ignored in\ncreate/update requests.

\n
\n \n
\n
\n
\n
\n
\n

timeframe [required]

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

warning

\n
\n

double

\n

The warning value for the service level objective.

\n
\n \n
\n
\n
\n
\n
\n

warning_display

\n
\n

string

\n

A string representation of the warning target (see the description of\nthe target_display field for details).

Included in service level objective responses if a warning target exists.\nIgnored in create/update requests.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

timeframe

\n
\n

enum

\n

The SLO time window options. Note that "custom" is not a valid option for creating\nor updating SLOs. It is only used when querying SLO history over custom timeframes. \nAllowed enum values: 7d,30d,90d,custom

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The type of the service level objective. \nAllowed enum values: metric,monitor,time_slice

\n
\n \n
\n
\n
\n
\n
\n

warning_threshold

\n
\n

double

\n

The optional warning threshold such that when the service level indicator is\nbelow this value for the given threshold, but above the target threshold, the\nobjective appears in a "warning" state. This value must be greater than the target\nthreshold.

\n
\n \n
\n
" } }, "GetSLOCorrections": { diff --git a/content/en/api/v1/service-level-objectives/request.CreateSLO_707861409.json b/content/en/api/v1/service-level-objectives/request.CreateSLO_707861409.json new file mode 100644 index 00000000000..a45174228b9 --- /dev/null +++ b/content/en/api/v1/service-level-objectives/request.CreateSLO_707861409.json @@ -0,0 +1,43 @@ +{ + "type": "metric", + "description": "Metric SLO using sli_specification", + "name": "Example-Service-Level-Objective", + "sli_specification": { + "count": { + "good_events_formula": { + "formula": "query1 - query2" + }, + "bad_events_formula": { + "formula": "query2" + }, + "queries": [ + { + "data_source": "metrics", + "name": "query1", + "query": "sum:httpservice.hits{*}.as_count()" + }, + { + "data_source": "metrics", + "name": "query2", + "query": "sum:httpservice.errors{*}.as_count()" + } + ] + } + }, + "tags": [ + "env:prod", + "type:count" + ], + "thresholds": [ + { + "target": 99.0, + "target_display": "99.0", + "timeframe": "7d", + "warning": 99.5, + "warning_display": "99.5" + } + ], + "timeframe": "7d", + "target_threshold": 99.0, + "warning_threshold": 99.5 +} \ No newline at end of file diff --git a/content/en/api/v2/observability-pipelines/examples.json b/content/en/api/v2/observability-pipelines/examples.json index fee19bb60cb..c78d8b8d878 100644 --- a/content/en/api/v2/observability-pipelines/examples.json +++ b/content/en/api/v2/observability-pipelines/examples.json @@ -63,6 +63,7 @@ ], "sources": [ { + "address_key": "DATADOG_AGENT_ADDRESS", "id": "datadog-agent-source", "tls": { "ca_file": "string", @@ -85,7 +86,7 @@ "totalCount": 42 } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

The schema data.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Metadata about the response.

\n
\n
\n
\n
\n
\n

totalCount

\n
\n

int64

\n

The total number of pipelines.

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

[object]

\n

The schema data.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

meta

\n
\n

object

\n

Metadata about the response.

\n
\n
\n
\n
\n
\n

totalCount

\n
\n

int64

\n

The total number of pipelines.

\n
\n \n
\n
\n
\n
" }, "400": { "json": { @@ -181,6 +182,7 @@ ], "sources": [ { + "address_key": "DATADOG_AGENT_ADDRESS", "id": "datadog-agent-source", "tls": { "ca_file": "string", @@ -199,7 +201,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" }, "400": { "json": { @@ -349,6 +351,7 @@ ], "sources": [ { + "address_key": "DATADOG_AGENT_ADDRESS", "id": "datadog-agent-source", "tls": { "ca_file": "string", @@ -366,7 +369,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" } }, "ValidatePipeline": { @@ -526,6 +529,7 @@ ], "sources": [ { + "address_key": "DATADOG_AGENT_ADDRESS", "id": "datadog-agent-source", "tls": { "ca_file": "string", @@ -543,7 +547,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the the pipeline configuration.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" } }, "DeletePipeline": { @@ -650,6 +654,7 @@ ], "sources": [ { + "address_key": "DATADOG_AGENT_ADDRESS", "id": "datadog-agent-source", "tls": { "ca_file": "string", @@ -668,7 +673,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" }, "403": { "json": { @@ -756,6 +761,7 @@ ], "sources": [ { + "address_key": "DATADOG_AGENT_ADDRESS", "id": "datadog-agent-source", "tls": { "ca_file": "string", @@ -774,7 +780,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" }, "400": { "json": { @@ -933,6 +939,7 @@ ], "sources": [ { + "address_key": "DATADOG_AGENT_ADDRESS", "id": "datadog-agent-source", "tls": { "ca_file": "string", @@ -951,7 +958,7 @@ "type": "pipelines" } }, - "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" + "html": "
\n
\n
\n
\n

data [required]

\n
\n

object

\n

Contains the pipeline’s ID, type, and configuration attributes.

\n
\n
\n
\n
\n
\n

attributes [required]

\n
\n

object

\n

Defines the pipeline’s name and its components (sources, processors, and destinations).

\n
\n
\n
\n
\n
\n

config [required]

\n
\n

object

\n

Specifies the pipeline's configuration, including its sources, processors, and destinations.

\n
\n
\n
\n
\n
\n

destinations [required]

\n
\n

[ <oneOf>]

\n

A list of destination components where processed logs are sent.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The http_client destination sends data to an HTTP endpoint.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

HTTP authentication strategy. \nAllowed enum values: none,basic,bearer

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for HTTP requests.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm. \nAllowed enum values: gzip

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URI.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_opensearch destination writes logs to Amazon OpenSearch.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth [required]

\n
\n

object

\n

Authentication settings for the Amazon OpenSearch destination.\nThe strategy field determines whether basic or AWS-based authentication is used.

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The ARN of the role to assume (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

aws_region

\n
\n

string

\n

AWS region

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

External ID for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

Session name for the assumed role (used with aws strategy).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be amazon_opensearch. \nAllowed enum values: amazon_opensearch

default: amazon_opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 destination sends your logs in Datadog-rehydratable format to an Amazon S3 bucket for archiving.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The amazon_s3_generic destination sends your logs to an Amazon S3 bucket.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

batch_settings

\n
\n

object

\n

Event batching settings

\n
\n
\n
\n
\n
\n

batch_size

\n
\n

int64

\n

Maximum batch size in bytes.

\n
\n \n
\n
\n
\n
\n
\n

timeout_secs

\n
\n

int64

\n

Maximum number of seconds to wait before flushing the batch.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

S3 bucket name.

\n
\n \n
\n
\n
\n
\n
\n

compression [required]

\n
\n

 <oneOf>

\n

Compression algorithm applied to encoded logs.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Zstd compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always zstd. \nAllowed enum values: zstd

default: zstd

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Zstd compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Gzip compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always gzip. \nAllowed enum values: gzip

default: gzip

\n
\n \n
\n
\n
\n
\n
\n

level [required]

\n
\n

int64

\n

Gzip compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Snappy compression.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

The compression type. Always snappy. \nAllowed enum values: snappy

default: snappy

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

 <oneOf>

\n

Encoding format for the destination.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

JSON encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always json. \nAllowed enum values: json

default: json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Parquet encoding.

\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The encoding type. Always parquet. \nAllowed enum values: parquet

default: parquet

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

S3 storage class. \nAllowed enum values: STANDARD,REDUCED_REDUNDANCY,INTELLIGENT_TIERING,STANDARD_IA,EXPRESS_ONEZONE,ONEZONE_IA,GLACIER,GLACIER_IR,DEEP_ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_s3_generic. \nAllowed enum values: amazon_s3_generic

default: amazon_s3_generic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The amazon_security_lake destination sends your logs to Amazon Security Lake.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the Amazon S3 bucket in Security Lake (3-63 characters).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

custom_source_name [required]

\n
\n

string

\n

Custom source name for the logs in Security Lake.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region of the S3 bucket.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always amazon_security_lake. \nAllowed enum values: amazon_security_lake

default: amazon_security_lake

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The azure_storage destination forwards logs to an Azure Blob Storage container.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

blob_prefix

\n
\n

string

\n

Optional prefix for blobs written to the container.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

connection_string_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure Storage connection string.

\n
\n \n
\n
\n
\n
\n
\n

container_name [required]

\n
\n

string

\n

The name of the Azure Blob Storage container to store logs in.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be azure_storage. \nAllowed enum values: azure_storage

default: azure_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The cloud_prem destination sends logs to Datadog CloudPrem.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CloudPrem endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be cloud_prem. \nAllowed enum values: cloud_prem

default: cloud_prem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The crowdstrike_next_gen_siem destination forwards logs to CrowdStrike Next Gen SIEM.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

compression

\n
\n

object

\n

Compression configuration for log events.

\n
\n
\n
\n
\n
\n

algorithm [required]

\n
\n

enum

\n

Compression algorithm for log events. \nAllowed enum values: gzip,zlib

\n
\n \n
\n
\n
\n
\n
\n

level

\n
\n

int64

\n

Compression level.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the CrowdStrike API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be crowdstrike_next_gen_siem. \nAllowed enum values: crowdstrike_next_gen_siem

default: crowdstrike_next_gen_siem

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The datadog_logs destination forwards logs to Datadog Log Management.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

routes

\n
\n

[object]

\n

A list of routing rules that forward matching logs to Datadog using dedicated API keys.

\n
\n
\n
\n
\n
\n

api_key_key

\n
\n

string

\n

Name of the environment variable or secret that stores the Datadog API key used by this route.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query that determines which logs are forwarded using this route.

\n
\n \n
\n
\n
\n
\n
\n

route_id

\n
\n

string

\n

Unique identifier for this route within the destination.

\n
\n \n
\n
\n
\n
\n
\n

site

\n
\n

string

\n

Datadog site where matching logs are sent (for example, us1).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_logs. \nAllowed enum values: datadog_logs

default: datadog_logs

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The elasticsearch destination writes logs to an Elasticsearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

api_version

\n
\n

enum

\n

The Elasticsearch API version to use. Set to auto to auto-detect. \nAllowed enum values: auto,v6,v7,v8

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to in Elasticsearch.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to Elasticsearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be elasticsearch. \nAllowed enum values: elasticsearch

default: elasticsearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The google_chronicle destination sends logs to Google Chronicle.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

customer_id [required]

\n
\n

string

\n

The Google Chronicle customer ID.

\n
\n \n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The encoding format for the logs sent to Chronicle. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Chronicle endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

log_type

\n
\n

string

\n

The log type metadata associated with the Chronicle destination.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_chronicle. \nAllowed enum values: google_chronicle

default: google_chronicle

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The google_cloud_storage destination stores logs in a Google Cloud Storage (GCS) bucket.\nIt requires a bucket name, Google Cloud authentication, and metadata fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

acl

\n
\n

enum

\n

Access control list setting for objects written to the bucket. \nAllowed enum values: private,project-private,public-read,authenticated-read,bucket-owner-read,bucket-owner-full-control

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

bucket [required]

\n
\n

string

\n

Name of the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the destination component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_prefix

\n
\n

string

\n

Optional prefix for object keys within the GCS bucket.

\n
\n \n
\n
\n
\n
\n
\n

metadata

\n
\n

[object]

\n

Custom metadata to attach to each object uploaded to the GCS bucket.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The metadata key.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The metadata value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

storage_class [required]

\n
\n

enum

\n

Storage class used for objects stored in GCS. \nAllowed enum values: STANDARD,NEARLINE,COLDLINE,ARCHIVE

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always google_cloud_storage. \nAllowed enum values: google_cloud_storage

default: google_cloud_storage

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The google_pubsub destination publishes logs to a Google Cloud Pub/Sub topic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Google Cloud Pub/Sub endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub topic.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Pub/Sub topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The kafka destination sends logs to Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

compression

\n
\n

enum

\n

Compression codec for Kafka messages. \nAllowed enum values: none,gzip,snappy,lz4,zstd

\n
\n \n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

headers_key

\n
\n

string

\n

The field name to use for Kafka message headers.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

key_field

\n
\n

string

\n

The field name to use as the Kafka message key.

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka producer configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

message_timeout_ms

\n
\n

int64

\n

Maximum time in milliseconds to wait for message delivery confirmation.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_duration_secs

\n
\n

int64

\n

Duration in seconds for the rate limit window.

\n
\n \n
\n
\n
\n
\n
\n

rate_limit_num

\n
\n

int64

\n

Maximum number of messages allowed per rate limit duration.

\n
\n \n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

socket_timeout_ms

\n
\n

int64

\n

Socket timeout in milliseconds for network requests.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topic [required]

\n
\n

string

\n

The Kafka topic name to publish logs to.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The microsoft_sentinel destination forwards logs to Microsoft Sentinel.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

client_id [required]

\n
\n

string

\n

Azure AD client ID used for authentication.

\n
\n \n
\n
\n
\n
\n
\n

client_secret_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Azure AD client secret.

\n
\n \n
\n
\n
\n
\n
\n

dce_uri_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Data Collection Endpoint (DCE) URI.

\n
\n \n
\n
\n
\n
\n
\n

dcr_immutable_id [required]

\n
\n

string

\n

The immutable ID of the Data Collection Rule (DCR).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

table [required]

\n
\n

string

\n

The name of the Log Analytics table where logs are sent.

\n
\n \n
\n
\n
\n
\n
\n

tenant_id [required]

\n
\n

string

\n

Azure AD tenant ID.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be microsoft_sentinel. \nAllowed enum values: microsoft_sentinel

default: microsoft_sentinel

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The new_relic destination sends logs to the New Relic platform.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

account_id_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic account ID.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

license_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the New Relic license key.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The New Relic region. \nAllowed enum values: us,eu

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be new_relic. \nAllowed enum values: new_relic

default: new_relic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opensearch destination writes logs to an OpenSearch cluster.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Authentication settings for the Elasticsearch destination.\nWhen strategy is basic, use username_key and password_key to reference credentials stored in environment variables or secrets.

\n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch password (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The authentication strategy to use. \nAllowed enum values: basic,aws

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Elasticsearch username (used when strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

bulk_index

\n
\n

string

\n

The index to write logs to.

\n
\n \n
\n
\n
\n
\n
\n

data_stream

\n
\n

object

\n

Configuration options for writing to OpenSearch Data Streams instead of a fixed index.

\n
\n
\n
\n
\n
\n

dataset

\n
\n

string

\n

The data stream dataset for your logs. This groups logs by their source or application.

\n
\n \n
\n
\n
\n
\n
\n

dtype

\n
\n

string

\n

The data stream type for your logs. This determines how logs are categorized within the data stream.

\n
\n \n
\n
\n
\n
\n
\n

namespace

\n
\n

string

\n

The data stream namespace for your logs. This separates logs into different environments or domains.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the OpenSearch endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be opensearch. \nAllowed enum values: opensearch

default: opensearch

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The rsyslog destination forwards logs to an external rsyslog server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sentinel_one destination sends logs to SentinelOne.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

enum

\n

The SentinelOne region to send logs to. \nAllowed enum values: us,eu,ca,data_set_us

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SentinelOne API token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sentinel_one. \nAllowed enum values: sentinel_one

default: sentinel_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The socket destination sends logs over TCP or UDP to a remote server.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the socket address (host:port).

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Each log event is delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingNewlineDelimitedMethod object. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Event data is not delimited at all.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingBytesMethod object. \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Each log event is separated using the specified delimiter character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used as a delimiter.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

The definition of ObservabilityPipelineSocketDestinationFramingCharacterDelimitedMethod object. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to send logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The splunk_hec destination forwards logs to Splunk using the HTTP Event Collector (HEC).

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auto_extract_timestamp

\n
\n

boolean

\n

If true, Splunk tries to extract timestamps from incoming log events.\nIf false, Splunk assigns the time the event was received.

\n
\n \n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

Encoding format for log events. \nAllowed enum values: json,raw_message

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

index

\n
\n

string

\n

Optional name of the Splunk index where logs are written.

\n
\n \n
\n
\n
\n
\n
\n

indexed_fields

\n
\n

[string]

\n

List of log field names to send as indexed fields to Splunk HEC. Available only when encoding is json.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

sourcetype

\n
\n

string

\n

The Splunk sourcetype to assign to log events.

\n
\n \n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Splunk HEC token.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The sumo_logic destination forwards logs to Sumo Logic.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

encoding

\n
\n

enum

\n

The output encoding format. \nAllowed enum values: json,raw_message,logfmt

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Sumo Logic HTTP endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

header_custom_fields

\n
\n

[object]

\n

A list of custom headers to include in the request to Sumo Logic.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The header field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The header field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

header_host_name

\n
\n

string

\n

Optional override for the host name header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_category

\n
\n

string

\n

Optional override for the source category header.

\n
\n \n
\n
\n
\n
\n
\n

header_source_name

\n
\n

string

\n

Optional override for the source name header.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 23

\n
\n

object

\n

The syslog_ng destination forwards logs to an external syslog-ng server over TCP or UDP using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

buffer

\n
\n

 <oneOf>

\n

Configuration for buffer settings on destination components.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Options for configuring a disk buffer.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the disk buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a disk buffer. \nAllowed enum values: disk

default: disk

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Options for configuring a memory buffer by byte size.

\n
\n
\n
\n
\n
\n

max_size [required]

\n
\n

int64

\n

Maximum size of the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Options for configuring a memory buffer by queue length.

\n
\n
\n
\n
\n
\n

max_events [required]

\n
\n

int64

\n

Maximum events for the memory buffer.

\n
\n \n
\n
\n
\n
\n
\n

type

\n
\n

enum

\n

The type of the buffer that will be configured, a memory buffer. \nAllowed enum values: memory

default: memory

\n
\n \n
\n
\n
\n
\n
\n

when_full

\n
\n

enum

\n

Behavior when the buffer is full (block and stop accepting new events, or drop new events) \nAllowed enum values: block,drop_newest

default: block

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the syslog-ng server endpoint URL.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

keepalive

\n
\n

int64

\n

Optional socket keepalive duration in milliseconds.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 24

\n
\n

object

\n

The datadog_metrics destination forwards metrics to Datadog.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of component IDs whose output is used as the input for this component.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The destination type. The value should always be datadog_metrics. \nAllowed enum values: datadog_metrics

default: datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pipeline_type

\n
\n

enum

\n

The type of data being ingested. Defaults to logs if not specified. \nAllowed enum values: logs,metrics

default: logs

\n
\n \n
\n
\n
\n
\n
\n

processor_groups

\n
\n

[object]

\n

A list of processor groups that transform or enrich log data.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

processors

\n
\n

[object]

\n

DEPRECATED: A list of processor groups that transform or enrich log data.

Deprecated: This field is deprecated, you should now use the processor_groups field.

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Whether this processor group is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for the processor group.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Conditional expression for when this processor group should execute.

\n
\n \n
\n
\n
\n
\n
\n

inputs [required]

\n
\n

[string]

\n

A list of IDs for components whose output is used as the input for this processor group.

\n
\n \n
\n
\n
\n
\n
\n

processors [required]

\n
\n

[ <oneOf>]

\n

Processors applied sequentially within this group. Events flow through each processor in order.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The filter processor allows conditional processing of logs/metrics based on a Datadog search query. Logs/metrics that match the include query are passed through; others are discarded.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs/metrics should pass through the filter. Logs/metrics that match this query continue to downstream components; others are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be filter. \nAllowed enum values: filter

default: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The add_env_vars processor adds environment variable values to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this processor in the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_env_vars. \nAllowed enum values: add_env_vars

default: add_env_vars

\n
\n \n
\n
\n
\n
\n
\n

variables [required]

\n
\n

[object]

\n

A list of environment variable mappings to apply to log fields.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The target field in the log event.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the environment variable to read.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The add_fields processor adds static key-value fields to logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of static fields (key-value pairs) that is added to each log event processed by this component.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_fields. \nAllowed enum values: add_fields

default: add_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The add_hostname processor adds the hostname to log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be add_hostname. \nAllowed enum values: add_hostname

default: add_hostname

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The custom_processor processor transforms events using Vector Remap Language (VRL) scripts with advanced filtering capabilities.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. This field should always be set to * for the custom_processor processor.

default: *

\n
\n \n
\n
\n
\n
\n
\n

remaps [required]

\n
\n

[object]

\n

Array of VRL remap rules.

\n
\n
\n
\n
\n
\n

drop_on_error [required]

\n
\n

boolean

\n

Whether to drop events that caused errors during processing.

\n
\n \n
\n
\n
\n
\n
\n

enabled

\n
\n

boolean

\n

Whether this remap rule is enabled.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to filter events for this specific remap rule.

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A descriptive name for this remap rule.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The VRL script source code that defines the processing logic.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be custom_processor. \nAllowed enum values: custom_processor

default: custom_processor

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The datadog_tags processor includes or excludes specific Datadog tags in your logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be datadog_tags. \nAllowed enum values: datadog_tags

default: datadog_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The dedupe processor removes duplicate fields in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

cache

\n
\n

object

\n

Configuration for the cache used to detect duplicates.

\n
\n
\n
\n
\n
\n

num_events [required]

\n
\n

int64

\n

The number of events to cache for duplicate detection.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of log field paths to check for duplicates.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The deduplication mode to apply to the fields. \nAllowed enum values: match,ignore

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be dedupe. \nAllowed enum values: dedupe

default: dedupe

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The enrichment_table processor enriches logs using a static CSV file, GeoIP database, or reference table. Exactly one of file, geoip, or reference_table must be configured.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

file

\n
\n

object

\n

Defines a static enrichment table loaded from a CSV file.

\n
\n
\n
\n
\n
\n

encoding [required]

\n
\n

object

\n

File encoding format.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

The encoding delimiter.

\n
\n \n
\n
\n
\n
\n
\n

includes_headers [required]

\n
\n

boolean

\n

The encoding includes_headers.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Specifies the encoding format (e.g., CSV) used for enrichment tables. \nAllowed enum values: csv

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

key [required]

\n
\n

[object]

\n

Key fields used to look up enrichment values.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

comparison [required]

\n
\n

enum

\n

Defines how to compare key fields for enrichment table lookups. \nAllowed enum values: equals

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The items field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the CSV file.

\n
\n \n
\n
\n
\n
\n
\n

schema [required]

\n
\n

[object]

\n

Schema defining column names and their types.

\n
\n
\n
\n
\n
\n

column [required]

\n
\n

string

\n

The items column.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Declares allowed data types for enrichment table columns. \nAllowed enum values: string,boolean,integer,float,date,timestamp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

geoip

\n
\n

object

\n

Uses a GeoIP database to enrich logs based on an IP field.

\n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the IP field in the log.

\n
\n \n
\n
\n
\n
\n
\n

locale [required]

\n
\n

string

\n

Locale used to resolve geographical names.

\n
\n \n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

Path to the GeoIP database file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

reference_table

\n
\n

object

\n

Uses a Datadog reference table to enrich logs.

\n
\n
\n
\n
\n
\n

app_key_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Datadog application key used to access the reference table.

\n
\n \n
\n
\n
\n
\n
\n

columns

\n
\n

[string]

\n

List of column names to include from the reference table. If not provided, all columns are included.

\n
\n \n
\n
\n
\n
\n
\n

key_field [required]

\n
\n

string

\n

Path to the field in the log event to match against the reference table.

\n
\n \n
\n
\n
\n
\n
\n

table_id [required]

\n
\n

string

\n

The unique identifier of the reference table.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

string

\n

Path where enrichment results should be stored in the log.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be enrichment_table. \nAllowed enum values: enrichment_table

default: enrichment_table

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The generate_datadog_metrics processor creates custom metrics from logs and sends them to Datadog.\nMetrics can be counters, gauges, or distributions and optionally grouped by log fields.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

metrics

\n
\n

[object]

\n

Configuration for generating individual metrics.

\n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional fields used to group the metric series.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

Datadog filter query to match logs for metric generation.

\n
\n \n
\n
\n
\n
\n
\n

metric_type [required]

\n
\n

enum

\n

Type of metric to create. \nAllowed enum values: count,gauge,distribution

\n
\n \n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the custom metric to be created.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

 <oneOf>

\n

Specifies how the value of the generated metric is computed.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Strategy that increments a generated metric by one for each matching event.

\n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Increments the metric by 1 for each matching event. \nAllowed enum values: increment_by_one

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Strategy that increments a generated metric based on the value of a log field.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

Name of the log field containing the numeric value to increment the metric by.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

Uses a numeric field in the log event as the metric increment. \nAllowed enum values: increment_by_field

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. Always generate_datadog_metrics. \nAllowed enum values: generate_datadog_metrics

default: generate_datadog_metrics

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The ocsf_mapper processor transforms logs into the OCSF schema using a predefined mapping configuration.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used to reference this component in other parts of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

mappings [required]

\n
\n

[object]

\n

A list of mapping rules to convert events to the OCSF format.

\n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to select the logs that this mapping should apply to.

\n
\n \n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

 <oneOf>

\n

Defines a single mapping rule for transforming logs into the OCSF schema.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

enum

\n

Predefined library mappings for common log formats. \nAllowed enum values: CloudTrail Account Change,GCP Cloud Audit CreateBucket,GCP Cloud Audit CreateSink,GCP Cloud Audit SetIamPolicy,GCP Cloud Audit UpdateSink,Github Audit Log API Activity,Google Workspace Admin Audit addPrivilege,Microsoft 365 Defender Incident,Microsoft 365 Defender UserLoggedIn,Okta System Log Authentication

Show 1 more,Palo Alto Networks Firewall Traffic

\n
\n \n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Custom OCSF mapping configuration for transforming logs.

\n
\n
\n
\n
\n
\n

mapping [required]

\n
\n

[object]

\n

A list of field mapping rules for transforming log fields to OCSF schema fields.

\n
\n
\n
\n
\n
\n

default

\n
\n

\n

The default value to use if the source field is missing or empty.

\n
\n \n
\n
\n
\n
\n
\n

dest [required]

\n
\n

string

\n

The destination OCSF field path.

\n
\n \n
\n
\n
\n
\n
\n

lookup

\n
\n

object

\n

Lookup table configuration for mapping source values to destination values.

\n
\n \n
\n
\n
\n
\n
\n

source

\n
\n

\n

The source field path from the log event.

\n
\n \n
\n
\n
\n
\n
\n

sources

\n
\n

\n

Multiple source field paths for combined mapping.

\n
\n \n
\n
\n
\n
\n
\n

value

\n
\n

\n

A static value to use for the destination field.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

metadata [required]

\n
\n

object

\n

Metadata for the custom OCSF mapping.

\n
\n
\n
\n
\n
\n

class [required]

\n
\n

string

\n

The OCSF event class name.

\n
\n \n
\n
\n
\n
\n
\n

profiles

\n
\n

[string]

\n

A list of OCSF profiles to apply.

\n
\n \n
\n
\n
\n
\n
\n

version [required]

\n
\n

string

\n

The OCSF schema version.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

version [required]

\n
\n

int64

\n

The version of the custom mapping configuration.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be ocsf_mapper. \nAllowed enum values: ocsf_mapper

default: ocsf_mapper

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The parse_grok processor extracts structured fields from unstructured log messages using Grok patterns.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

disable_library_rules

\n
\n

boolean

\n

If set to true, disables the default Grok rules provided by Datadog.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

The list of Grok parsing rules. If multiple matching rules are provided, they are evaluated in order. The first successful match is applied.

\n
\n
\n
\n
\n
\n

match_rules [required]

\n
\n

[object]

\n

A list of Grok parsing rules that define how to extract fields from the source field.\nEach rule must contain a name and a valid Grok pattern.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The name of the field in the log event to apply the Grok rules to.

\n
\n \n
\n
\n
\n
\n
\n

support_rules

\n
\n

[object]

\n

A list of Grok helper rules that can be referenced by the parsing rules.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

The definition of the Grok helper rule.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_grok. \nAllowed enum values: parse_grok

default: parse_grok

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The parse_json processor extracts JSON from a specified field and flattens it into the event. This is useful when logs contain embedded JSON as a string.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains a JSON string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_json. \nAllowed enum values: parse_json

default: parse_json

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The parse_xml processor parses XML from a specified field and extracts it into the event.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

always_use_text_key

\n
\n

boolean

\n

Whether to always use a text key for element content.

\n
\n \n
\n
\n
\n
\n
\n

attr_prefix

\n
\n

string

\n

The prefix to use for XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The name of the log field that contains an XML string.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

include_attr

\n
\n

boolean

\n

Whether to include XML attributes in the parsed output.

\n
\n \n
\n
\n
\n
\n
\n

parse_bool

\n
\n

boolean

\n

Whether to parse boolean values from strings.

\n
\n \n
\n
\n
\n
\n
\n

parse_null

\n
\n

boolean

\n

Whether to parse null values.

\n
\n \n
\n
\n
\n
\n
\n

parse_number

\n
\n

boolean

\n

Whether to parse numeric values from strings.

\n
\n \n
\n
\n
\n
\n
\n

text_key

\n
\n

string

\n

The key name to use for text content within XML elements. Must be at least 1 character if specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be parse_xml. \nAllowed enum values: parse_xml

default: parse_xml

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The quota processor measures logging traffic for logs that match a specified filter. When the configured daily quota is met, the processor can drop or alert.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

drop_events

\n
\n

boolean

\n

If set to true, logs that match the quota filter and are sent after the quota is exceeded are dropped. Logs that do not match the filter continue through the pipeline. Note: You can set either drop_events or overflow_action, but not both.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

ignore_when_missing_partitions

\n
\n

boolean

\n

If true, the processor skips quota checks when partition fields are missing from the logs.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the quota.

\n
\n \n
\n
\n
\n
\n
\n

overflow_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

overrides

\n
\n

[object]

\n

A list of alternate quota rules that apply to specific sets of events, identified by matching field values. Each override can define a custom limit.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of field matchers used to apply a specific override. If an event matches all listed key-value pairs, the corresponding override limit is enforced.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The field name.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The field value.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

limit [required]

\n
\n

object

\n

The maximum amount of data or number of events allowed before the quota is enforced. Can be specified in bytes or events.

\n
\n
\n
\n
\n
\n

enforce [required]

\n
\n

enum

\n

Unit for quota enforcement in bytes for data size or events for count. \nAllowed enum values: bytes,events

\n
\n \n
\n
\n
\n
\n
\n

limit [required]

\n
\n

int64

\n

The limit for quota enforcement.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

partition_fields

\n
\n

[string]

\n

A list of fields used to segment log traffic for quota enforcement. Quotas are tracked independently by unique combinations of these field values.

\n
\n \n
\n
\n
\n
\n
\n

too_many_buckets_action

\n
\n

enum

\n

The action to take when the quota or bucket limit is exceeded. Options:

\n
    \n
  • drop: Drop the event.
  • \n
  • no_action: Let the event pass through.
  • \n
  • overflow_routing: Route to an overflow destination. \nAllowed enum values: drop,no_action,overflow_routing
  • \n
\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be quota. \nAllowed enum values: quota

default: quota

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The reduce processor aggregates and merges logs based on matching keys and merge strategies.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by [required]

\n
\n

[string]

\n

A list of fields used to group log events for merging.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

merge_strategies [required]

\n
\n

[object]

\n

List of merge strategies defining how values from grouped events should be combined.

\n
\n
\n
\n
\n
\n

path [required]

\n
\n

string

\n

The field path in the log event.

\n
\n \n
\n
\n
\n
\n
\n

strategy [required]

\n
\n

enum

\n

The merge strategy to apply. \nAllowed enum values: discard,retain,sum,max,min,array,concat,concat_newline,concat_raw,shortest_array

Show 2 more,longest_array,flat_unique

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be reduce. \nAllowed enum values: reduce

default: reduce

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The remove_fields processor deletes specified fields from logs.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

A list of field names to be removed from each log event.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be remove_fields. \nAllowed enum values: remove_fields

default: remove_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The rename_fields processor changes field names.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[object]

\n

A list of rename rules specifying which fields to rename in the event, what to rename them to, and whether to preserve the original fields.

\n
\n
\n
\n
\n
\n

destination [required]

\n
\n

string

\n

The field name to assign the renamed value to.

\n
\n \n
\n
\n
\n
\n
\n

preserve_source [required]

\n
\n

boolean

\n

Indicates whether the original field, that is received from the source, should be kept (true) or removed (false) after renaming.

\n
\n \n
\n
\n
\n
\n
\n

source [required]

\n
\n

string

\n

The original field name in the log event that should be renamed.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

A unique identifier for this component. Used to reference this component in other parts of the pipeline (e.g., as input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be rename_fields. \nAllowed enum values: rename_fields

default: rename_fields

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 18

\n
\n

object

\n

The sample processor allows probabilistic sampling of logs at a fixed rate.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields to group events by. Each group is sampled independently.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

percentage [required]

\n
\n

double

\n

The percentage of logs to sample.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sample. \nAllowed enum values: sample

default: sample

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 19

\n
\n

object

\n

The sensitive_data_scanner processor detects and optionally redacts sensitive data in log events.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for identifying and acting on sensitive data patterns.

\n
\n
\n
\n
\n
\n

keyword_options

\n
\n

object

\n

Configuration for keywords used to reinforce sensitive data pattern detection.

\n
\n
\n
\n
\n
\n

keywords [required]

\n
\n

[string]

\n

A list of keywords to match near the sensitive pattern.

\n
\n \n
\n
\n
\n
\n
\n

proximity [required]

\n
\n

int64

\n

Maximum number of tokens between a keyword and a sensitive value match.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

A name identifying the rule.

\n
\n \n
\n
\n
\n
\n
\n

on_match [required]

\n
\n

 <oneOf>

\n

Defines what action to take when sensitive data is matched.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Configuration for completely redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that completely replaces the matched sensitive data with a fixed replacement string to remove all visibility. \nAllowed enum values: redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Configuration for fully redacting sensitive data.

\n
\n
\n
\n
\n
\n

replace [required]

\n
\n

string

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionRedactOptions replace.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Configuration for hashing matched sensitive values.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that replaces the matched sensitive data with a hashed representation, preserving structure while securing content. \nAllowed enum values: hash

\n
\n \n
\n
\n
\n
\n
\n

options

\n
\n

object

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionHash options.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Configuration for partially redacting matched sensitive data.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

Action type that redacts part of the sensitive data while preserving a configurable number of characters, typically used for masking purposes (e.g., show last 4 digits of a credit card). \nAllowed enum values: partial_redact

\n
\n \n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Controls how partial redaction is applied, including character count and direction.

\n
\n
\n
\n
\n
\n

characters [required]

\n
\n

int64

\n

The ObservabilityPipelineSensitiveDataScannerProcessorActionPartialRedactOptions characters.

\n
\n \n
\n
\n
\n
\n
\n

direction [required]

\n
\n

enum

\n

Indicates whether to redact characters from the first or last part of the matched value. \nAllowed enum values: first,last

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

pattern [required]

\n
\n

 <oneOf>

\n

Pattern detection configuration for identifying sensitive data using either a custom regex or a library reference.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Defines a custom regex-based pattern for identifying sensitive data in logs.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for defining a custom regex pattern.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

rule [required]

\n
\n

string

\n

A regular expression used to detect sensitive values. Must be a valid regex.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates a custom regular expression is used for matching. \nAllowed enum values: custom

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Specifies a pattern from Datadog’s sensitive data detection library to match known sensitive data types.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Options for selecting a predefined library pattern and enabling keyword support.

\n
\n
\n
\n
\n
\n

description

\n
\n

string

\n

Human-readable description providing context about a sensitive data scanner rule

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Identifier for a predefined pattern from the sensitive data scanner pattern library.

\n
\n \n
\n
\n
\n
\n
\n

use_recommended_keywords

\n
\n

boolean

\n

Whether to augment the pattern with recommended keywords (optional).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

Indicates that a predefined library pattern is used. \nAllowed enum values: library

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

scope [required]

\n
\n

 <oneOf>

\n

Determines which parts of the log the pattern-matching rule should be applied to.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Includes only specific fields for sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule only to included fields. \nAllowed enum values: include

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Excludes specific fields from sensitive data scanning.

\n
\n
\n
\n
\n
\n

options [required]

\n
\n

object

\n

Fields to which the scope rule applies.

\n
\n
\n
\n
\n
\n

fields [required]

\n
\n

[string]

\n

The ObservabilityPipelineSensitiveDataScannerProcessorScopeOptions fields.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Excludes specific fields from processing. \nAllowed enum values: exclude

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Applies scanning across all available fields.

\n
\n
\n
\n
\n
\n

target [required]

\n
\n

enum

\n

Applies the rule to all fields. \nAllowed enum values: all

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

tags [required]

\n
\n

[string]

\n

Tags assigned to this rule for filtering and classification.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be sensitive_data_scanner. \nAllowed enum values: sensitive_data_scanner

default: sensitive_data_scanner

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 20

\n
\n

object

\n

The split_array processor splits array fields into separate events based on configured rules.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

arrays [required]

\n
\n

[object]

\n

A list of array split configurations.

\n
\n
\n
\n
\n
\n

field [required]

\n
\n

string

\n

The path to the array field to split.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this array split operation targets.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets. For split_array, this should typically be *.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be split_array. \nAllowed enum values: split_array

default: split_array

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 21

\n
\n

object

\n

The throttle processor limits the number of events that pass through over a given time window.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

group_by

\n
\n

[string]

\n

Optional list of fields used to group events before the threshold has been reached.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this processor.

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which logs this processor targets.

\n
\n \n
\n
\n
\n
\n
\n

threshold [required]

\n
\n

int64

\n

the number of events allowed in a given time window. Events sent after the threshold has been reached, are dropped.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be throttle. \nAllowed enum values: throttle

default: throttle

\n
\n \n
\n
\n
\n
\n
\n

window [required]

\n
\n

double

\n

The time window in seconds over which the threshold applies.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 22

\n
\n

object

\n

The metric_tags processor filters metrics based on their tags using Datadog tag key patterns.

Supported pipeline types: metrics

\n
\n
\n
\n
\n
\n

display_name

\n
\n

string

\n

The display name for a component.

\n
\n \n
\n
\n
\n
\n
\n

enabled [required]

\n
\n

boolean

\n

Indicates whether the processor is enabled.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query that determines which metrics the processor targets.

\n
\n \n
\n
\n
\n
\n
\n

rules [required]

\n
\n

[object]

\n

A list of rules for filtering metric tags.

\n
\n
\n
\n
\n
\n

action [required]

\n
\n

enum

\n

The action to take on tags with matching keys. \nAllowed enum values: include,exclude

\n
\n \n
\n
\n
\n
\n
\n

include [required]

\n
\n

string

\n

A Datadog search query used to determine which metrics this rule targets.

\n
\n \n
\n
\n
\n
\n
\n

keys [required]

\n
\n

[string]

\n

A list of tag keys to include or exclude.

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

The processing mode for tag filtering. \nAllowed enum values: filter

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The processor type. The value should always be metric_tags. \nAllowed enum values: metric_tags

default: metric_tags

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n
\n

sources [required]

\n
\n

[ <oneOf>]

\n

A list of configured data sources for the pipeline.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

The datadog_agent source collects logs/metrics from the Datadog Agent.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Datadog Agent source.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be datadog_agent. \nAllowed enum values: datadog_agent

default: datadog_agent

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

The amazon_data_firehose source ingests logs from AWS Data Firehose.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Firehose delivery stream address.

\n
\n \n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be amazon_data_firehose. \nAllowed enum values: amazon_data_firehose

default: amazon_data_firehose

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

The amazon_s3 source ingests logs from an Amazon S3 bucket.\nIt supports AWS authentication and TLS encryption.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

AWS authentication credentials used for accessing AWS services such as S3.\nIf omitted, the system’s default credentials are used (for example, the IAM role and environment variables).

\n
\n
\n
\n
\n
\n

assume_role

\n
\n

string

\n

The Amazon Resource Name (ARN) of the role to assume.

\n
\n \n
\n
\n
\n
\n
\n

external_id

\n
\n

string

\n

A unique identifier for cross-account role assumption.

\n
\n \n
\n
\n
\n
\n
\n

session_name

\n
\n

string

\n

A session identifier used for logging and tracing the assumed role session.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

region [required]

\n
\n

string

\n

AWS region where the S3 bucket resides.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always amazon_s3. \nAllowed enum values: amazon_s3

default: amazon_s3

\n
\n \n
\n
\n
\n
\n
\n

url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the S3 bucket URL.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

The fluent_bit source ingests logs from Fluent Bit.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent Bit receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be fluent_bit. \nAllowed enum values: fluent_bit

default: fluent_bit

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

The fluentd source ingests logs from a Fluentd-compatible service.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Fluent receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be `fluentd. \nAllowed enum values: fluentd

default: fluentd

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 6

\n
\n

object

\n

The google_pubsub source ingests logs from a Google Cloud Pub/Sub subscription.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth

\n
\n

object

\n

Google Cloud credentials used to authenticate with Google Cloud Storage.

\n
\n
\n
\n
\n
\n

credentials_file [required]

\n
\n

string

\n

Path to the Google Cloud service account key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

project [required]

\n
\n

string

\n

The Google Cloud project ID that owns the Pub/Sub subscription.

\n
\n \n
\n
\n
\n
\n
\n

subscription [required]

\n
\n

string

\n

The Pub/Sub subscription name from which messages are consumed.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be google_pubsub. \nAllowed enum values: google_pubsub

default: google_pubsub

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 7

\n
\n

object

\n

The http_client source scrapes logs from HTTP endpoints at regular intervals.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

auth_strategy

\n
\n

enum

\n

Optional authentication strategy for HTTP requests. \nAllowed enum values: none,basic,bearer,custom

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

endpoint_url_key

\n
\n

string

\n

Name of the environment variable or secret that holds the HTTP endpoint URL to scrape.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n

scrape_interval_secs

\n
\n

int64

\n

The interval (in seconds) between HTTP scrape requests.

\n
\n \n
\n
\n
\n
\n
\n

scrape_timeout_secs

\n
\n

int64

\n

The timeout (in seconds) for each scrape request.

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

token_key

\n
\n

string

\n

Name of the environment variable or secret that holds the bearer token (used when auth_strategy is bearer).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_client. \nAllowed enum values: http_client

default: http_client

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is basic).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 8

\n
\n

object

\n

The http_server source collects logs over HTTP POST from external services.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HTTP server.

\n
\n \n
\n
\n
\n
\n
\n

auth_strategy [required]

\n
\n

enum

\n

HTTP authentication method. \nAllowed enum values: none,plain

\n
\n \n
\n
\n
\n
\n
\n

custom_key

\n
\n

string

\n

Name of the environment variable or secret that holds a custom header value (used with custom auth strategies).

\n
\n \n
\n
\n
\n
\n
\n

decoding [required]

\n
\n

enum

\n

The decoding format used to interpret incoming logs. \nAllowed enum values: bytes,gelf,json,syslog

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique ID for the HTTP server source.

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the password (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be http_server. \nAllowed enum values: http_server

default: http_server

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the username (used when auth_strategy is plain).

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 9

\n
\n

object

\n

The kafka source ingests data from Apache Kafka topics.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

bootstrap_servers_key

\n
\n

string

\n

Name of the environment variable or secret that holds the Kafka bootstrap servers list.

\n
\n \n
\n
\n
\n
\n
\n

group_id [required]

\n
\n

string

\n

Consumer group ID used by the Kafka client.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

librdkafka_options

\n
\n

[object]

\n

Optional list of advanced Kafka client configuration options, defined as key-value pairs.

\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

The name of the librdkafka configuration option to set.

\n
\n \n
\n
\n
\n
\n
\n

value [required]

\n
\n

string

\n

The value assigned to the specified librdkafka configuration option.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

sasl

\n
\n

object

\n

Specifies the SASL mechanism for authenticating with a Kafka cluster.

\n
\n
\n
\n
\n
\n

mechanism

\n
\n

enum

\n

SASL mechanism used for Kafka authentication. \nAllowed enum values: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512

\n
\n \n
\n
\n
\n
\n
\n

password_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL password.

\n
\n \n
\n
\n
\n
\n
\n

username_key

\n
\n

string

\n

Name of the environment variable or secret that holds the SASL username.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

topics [required]

\n
\n

[string]

\n

A list of Kafka topic names to subscribe to. The source ingests messages from each topic specified.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be kafka. \nAllowed enum values: kafka

default: kafka

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 10

\n
\n

object

\n

The logstash source ingests logs from a Logstash forwarder.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Logstash receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be logstash. \nAllowed enum values: logstash

default: logstash

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 11

\n
\n

object

\n

The rsyslog source listens for logs over TCP or UDP from an rsyslog server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be rsyslog. \nAllowed enum values: rsyslog

default: rsyslog

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 12

\n
\n

object

\n

The socket source ingests logs over TCP or UDP.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the socket.

\n
\n \n
\n
\n
\n
\n
\n

framing [required]

\n
\n

 <oneOf>

\n

Framing method configuration for the socket source.

\n
\n
\n
\n
\n
\n

Option 1

\n
\n

object

\n

Byte frames which are delimited by a newline character.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a newline character. \nAllowed enum values: newline_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 2

\n
\n

object

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments).

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames are passed through as-is according to the underlying I/O boundaries (for example, split between messages or stream segments). \nAllowed enum values: bytes

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 3

\n
\n

object

\n

Byte frames which are delimited by a chosen character.

\n
\n
\n
\n
\n
\n

delimiter [required]

\n
\n

string

\n

A single ASCII character used to delimit events.

\n
\n \n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are delimited by a chosen character. \nAllowed enum values: character_delimited

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 4

\n
\n

object

\n

Byte frames according to the octet counting format as per RFC6587.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames according to the octet counting format as per RFC6587. \nAllowed enum values: octet_counting

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 5

\n
\n

object

\n

Byte frames which are chunked GELF messages.

\n
\n
\n
\n
\n
\n

method [required]

\n
\n

enum

\n

Byte frames which are chunked GELF messages. \nAllowed enum values: chunked_gelf

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used to receive logs. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

TLS configuration. Relevant only when mode is tcp.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be socket. \nAllowed enum values: socket

default: socket

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 13

\n
\n

object

\n

The splunk_hec source implements the Splunk HTTP Event Collector (HEC) API.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the HEC API.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_hec. \nAllowed enum values: splunk_hec

default: splunk_hec

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 14

\n
\n

object

\n

The splunk_tcp source receives logs from a Splunk Universal Forwarder over TCP.\nTLS is supported for secure transmission.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Splunk TCP receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. Always splunk_tcp. \nAllowed enum values: splunk_tcp

default: splunk_tcp

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 15

\n
\n

object

\n

The sumo_logic source receives logs from Sumo Logic collectors.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the Sumo Logic receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be sumo_logic. \nAllowed enum values: sumo_logic

default: sumo_logic

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 16

\n
\n

object

\n

The syslog_ng source listens for logs over TCP or UDP from a syslog-ng server using the syslog protocol.

Supported pipeline types: logs

\n
\n
\n
\n
\n
\n

address_key

\n
\n

string

\n

Name of the environment variable or secret that holds the listen address for the syslog-ng receiver.

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

mode [required]

\n
\n

enum

\n

Protocol used by the syslog source to receive messages. \nAllowed enum values: tcp,udp

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be syslog_ng. \nAllowed enum values: syslog_ng

default: syslog_ng

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

Option 17

\n
\n

object

\n

The opentelemetry source receives telemetry data using the OpenTelemetry Protocol (OTLP) over gRPC and HTTP.

Supported pipeline types: logs, metrics

\n
\n
\n
\n
\n
\n

grpc_address_key

\n
\n

string

\n

Environment variable name containing the gRPC server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

http_address_key

\n
\n

string

\n

Environment variable name containing the HTTP server address for receiving OTLP data. Must be a valid environment variable name (alphanumeric characters and underscores only).

\n
\n \n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the input to downstream components).

\n
\n \n
\n
\n
\n
\n
\n

tls

\n
\n

object

\n

Configuration for enabling TLS encryption between the pipeline component and external services.

\n
\n
\n
\n
\n
\n

ca_file

\n
\n

string

\n

Path to the Certificate Authority (CA) file used to validate the server’s TLS certificate.

\n
\n \n
\n
\n
\n
\n
\n

crt_file [required]

\n
\n

string

\n

Path to the TLS client certificate file used to authenticate the pipeline component with upstream or downstream services.

\n
\n \n
\n
\n
\n
\n
\n

key_file

\n
\n

string

\n

Path to the private key file associated with the TLS client certificate. Used for mutual TLS authentication.

\n
\n \n
\n
\n
\n
\n
\n

key_pass_key

\n
\n

string

\n

Name of the environment variable or secret that holds the passphrase for the private key file.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

type [required]

\n
\n

enum

\n

The source type. The value should always be opentelemetry. \nAllowed enum values: opentelemetry

default: opentelemetry

\n
\n \n
\n
\n
\n
\n
\n
\n
\n
\n
\n

use_legacy_search_syntax

\n
\n

boolean

\n

Set to true to continue using the legacy search syntax while migrating filter queries. After migrating all queries to the new syntax, set to false.\nThe legacy syntax is deprecated and will eventually be removed.\nRequires Observability Pipelines Worker 2.11 or later.\nSee Upgrade Your Filter Queries to the New Search Syntax for more information.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

name [required]

\n
\n

string

\n

Name of the pipeline.

\n
\n \n
\n
\n
\n
\n
\n
\n
\n

id [required]

\n
\n

string

\n

Unique identifier for the pipeline.

\n
\n \n
\n
\n
\n
\n
\n

type [required]

\n
\n

string

\n

The resource type identifier. For pipeline resources, this should always be set to pipelines.

default: pipelines

\n
\n \n
\n
\n
\n
" } } } \ No newline at end of file diff --git a/data/api/v1/CodeExamples.json b/data/api/v1/CodeExamples.json index cc8ecfccfa9..2d61f11e4c5 100644 --- a/data/api/v1/CodeExamples.json +++ b/data/api/v1/CodeExamples.json @@ -928,6 +928,11 @@ } ], "CreateSLO": [ + { + "group": "service_level_objectives", + "suffix": "_707861409", + "description": "Create a new metric SLO object using bad events formula returns \"OK\" response" + }, { "group": "service_level_objectives", "suffix": "_512760759", diff --git a/data/api/v1/full_spec.yaml b/data/api/v1/full_spec.yaml index 522d13e6887..5af99828fa2 100644 --- a/data/api/v1/full_spec.yaml +++ b/data/api/v1/full_spec.yaml @@ -12445,33 +12445,73 @@ components: type: object SLOCountDefinition: description: 'A count-based (metric) SLI specification, composed of three parts: - the good events formula, the total events formula, + the good events formula, - and the underlying queries.' + the bad or total events formula, and the underlying queries. + + Exactly one of `total_events_formula` or `bad_events_formula` must be provided.' example: - good_events_formula: query1 - query2 + bad_events_formula: query2 + good_events_formula: query1 queries: - data_source: metrics name: query1 - query: sum:trace.servlet.request.hits{*} by {env}.as_count() + query: sum:trace.servlet.request.hits{!http.status_code:500} by {env}.as_count() - data_source: metrics name: query2 - query: sum:trace.servlet.request.errors{*} by {env}.as_count() - total_events_formula: query1 + query: sum:trace.servlet.request.hits{http.status_code:500} by {env}.as_count() + oneOf: + - $ref: '#/components/schemas/SLOCountDefinitionWithTotalEventsFormula' + - $ref: '#/components/schemas/SLOCountDefinitionWithBadEventsFormula' + SLOCountDefinitionWithBadEventsFormula: + additionalProperties: false properties: + bad_events_formula: + $ref: '#/components/schemas/SLOFormula' + description: The bad events formula (recommended). Total events queries + can be defined using the `total_events_formula` field as an alternative. + Only one of `total_events_formula` or `bad_events_formula` must be provided. good_events_formula: $ref: '#/components/schemas/SLOFormula' queries: example: - data_source: metrics name: query1 - query: sum:trace.servlet.request.hits{*} by {env}.as_count() + query: sum:trace.servlet.request.hits{!http.status_code:500} by {env}.as_count() + - data_source: metrics + name: query2 + query: sum:trace.servlet.request.hits{http.status_code:500} by {env}.as_count() + items: + $ref: '#/components/schemas/SLODataSourceQueryDefinition' + minItems: 1 + type: array + required: + - good_events_formula + - bad_events_formula + - queries + type: object + SLOCountDefinitionWithTotalEventsFormula: + additionalProperties: false + properties: + good_events_formula: + $ref: '#/components/schemas/SLOFormula' + queries: + example: + - data_source: metrics + name: query1 + query: sum:trace.servlet.request.hits{!http.status_code:500} by {env}.as_count() + - data_source: metrics + name: query2 + query: sum:trace.servlet.request.hits{http.status_code:500} by {env}.as_count() items: $ref: '#/components/schemas/SLODataSourceQueryDefinition' minItems: 1 type: array total_events_formula: $ref: '#/components/schemas/SLOFormula' + description: The total events formula. Bad events queries can be defined + using the `bad_events_formula` field as an alternative. Only one of `total_events_formula` + or `bad_events_formula` must be provided. required: - good_events_formula - total_events_formula @@ -12482,15 +12522,15 @@ components: description: A metric SLI specification. example: count: - good_events_formula: query1 - query2 + bad_events_formula: query2 + good_events_formula: query1 queries: - data_source: metrics name: query1 - query: sum:trace.servlet.request.hits{*} by {env}.as_count() + query: sum:trace.servlet.request.hits{!http.status_code:500} by {env}.as_count() - data_source: metrics name: query2 - query: sum:trace.servlet.request.errors{*} by {env}.as_count() - total_events_formula: query1 + query: sum:trace.servlet.request.hits{http.status_code:500} by {env}.as_count() properties: count: $ref: '#/components/schemas/SLOCountDefinition' @@ -13626,7 +13666,7 @@ components: name: query1 query: sum:trace.servlet.request.hits{*} by {env}.as_count() - data_source: metrics - name: query1 + name: query2 query: sum:trace.servlet.request.errors{*} by {env}.as_count() threshold: 5 properties: diff --git a/data/api/v2/full_spec.yaml b/data/api/v2/full_spec.yaml index be475477bd8..04bb85e51f1 100644 --- a/data/api/v2/full_spec.yaml +++ b/data/api/v2/full_spec.yaml @@ -43147,6 +43147,11 @@ components: **Supported pipeline types:** logs, metrics' properties: + address_key: + description: Name of the environment variable or secret that holds the listen + address for the Datadog Agent source. + example: DATADOG_AGENT_ADDRESS + type: string id: description: The unique identifier for this component. Used in other parts of the pipeline to reference this component (for example, as the `input`