From c7d59e18b27b945e384284057938ea16d4ddf1d9 Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Mon, 16 Mar 2026 10:58:30 -0400 Subject: [PATCH 01/11] Moves Experimentation documentation from /product_analytics/experimentation/ to /experiments/, add aliases and redirect old URLs, moves Experiments to top navigation --- config/_default/menus/main.en.yaml | 23 ++++++------ .../experimentation => experiments}/_index.md | 36 ++++++------------- .../defining_metrics.md | 19 ++++------ .../minimum_detectable_effect.md | 2 ++ .../reading_results.md | 13 +++---- .../troubleshooting.md | 2 ++ 6 files changed, 37 insertions(+), 58 deletions(-) rename content/en/{product_analytics/experimentation => experiments}/_index.md (81%) rename content/en/{product_analytics/experimentation => experiments}/defining_metrics.md (93%) rename content/en/{product_analytics/experimentation => experiments}/minimum_detectable_effect.md (97%) rename content/en/{product_analytics/experimentation => experiments}/reading_results.md (85%) rename content/en/{product_analytics/experimentation => experiments}/troubleshooting.md (99%) diff --git a/config/_default/menus/main.en.yaml b/config/_default/menus/main.en.yaml index 21f512e55cf..8917ffc4713 100644 --- a/config/_default/menus/main.en.yaml +++ b/config/_default/menus/main.en.yaml @@ -8840,30 +8840,31 @@ menu: identifier: pa_profiles weight: 4 - name: Experiments - url: product_analytics/experimentation/ - parent: product_analytics + url: experiments + pre: experiment-wui + parent: digital_experience_heading identifier: pa_experiments - weight: 5 + weight: 50000 - name: Define Metrics - url: product_analytics/experimentation/defining_metrics + url: experiments/defining_metrics parent: pa_experiments identifier: pa_experiments_metrics - weight: 501 + weight: 1 - name: Reading Experiment Results - url: product_analytics/experimentation/reading_results + url: experiments/reading_results parent: pa_experiments identifier: pa_experiments_results - weight: 502 + weight: 2 - name: Minimum Detectable Effects - url: product_analytics/experimentation/minimum_detectable_effect + url: experiments/minimum_detectable_effect parent: pa_experiments identifier: pa_experiments_mde - weight: 503 + weight: 3 - name: Troubleshooting - url: product_analytics/experimentation/troubleshooting + url: experiments/troubleshooting parent: pa_experiments identifier: pa_experiments_troubleshooting - weight: 504 + weight: 4 - name: Guides url: product_analytics/guide/ parent: product_analytics diff --git a/content/en/product_analytics/experimentation/_index.md b/content/en/experiments/_index.md similarity index 81% rename from content/en/product_analytics/experimentation/_index.md rename to content/en/experiments/_index.md index 015e2fffb7e..d945e6cc64f 100644 --- a/content/en/product_analytics/experimentation/_index.md +++ b/content/en/experiments/_index.md @@ -1,11 +1,13 @@ --- title: Planning and Launching Experiments -description: Experimentation allows you to measure the causal relationship new experiences or features have on user outcomes. +description: Use Datadog Experiments to measure the causal relationship new experiences or features have on user outcomes. +aliases: + - /product_analytics/experimentation/ further_reading: - link: "https://www.datadoghq.com/blog/datadog-product-analytics" tag: "Blog" text: "Make data-driven design decisions with Product Analytics" -- link: "/product_analytics/experimentation/defining_metrics" +- link: "/experiments/defining_metrics" tag: "Documentation" text: "Defining Experiment Metrics" --- @@ -15,11 +17,10 @@ Datadog Experiments is in Preview. Complete the form to request access. {{< /callout >}} ## Overview -Datadog Experimentation allows you to measure the causal relationship that new experiences and features have on user outcomes. To do this, Experimentation randomly allocates traffic between two or more variations, using one of the variations as a control group. +Datadog Experiments allows you to measure the causal relationship that new experiences and features have on user outcomes. Experiments uses [Feature flags][4] to randomly allocate traffic between two or more variations, using one of the variations as a control group. This page walks you through planning and launching your experiments. - ## Setup To create, configure, and launch your experiment, complete the following steps: @@ -31,14 +32,12 @@ To create, configure, and launch your experiment, complete the following steps: {{< img src="/product_analytics/experiment/exp_create_experiment.png" alt="create an experiment and add a hypothesis for the experiment." style="width:80%;" >}} - ### Step 2 - Add metrics After you’ve created an experiment, add your primary metric and optional guardrails. See [Defining Metrics][2] for details on how to create metrics. {{< img src="/product_analytics/experiment/exp_decision_metrics1.png" alt="create an experiment and add a hypothesis for the experiment." style="width:80%;" >}} - #### Add a sample size calculation (optional) After selecting your experiment’s metrics, use the optional sample size calculator to determine how small of a change your experiment can reliably detect with your current sample size. @@ -58,34 +57,19 @@ After specifying your metrics, you can launch your experiment. 1. Click **Set up experiment on feature flag** to specify how you want to roll out your experiment. You can either launch the experiment to all traffic, or schedule a gradual rollout. - {{< img src="/product_analytics/experiment/exp_feature_flag.png" alt="Set up an experiment on a Feature Flag." style="width:90%;" >}} - ## Next steps -1. **[Defining metrics][2]**: Define the metrics you want to measure during your experimentation. -1. **[Reading Experiment Results][5]**: Review and explore your Experiment results. +1. **[Defining metrics][2]**: Define the metrics you want to measure during your experiments. +1. **[Reading Experiment Results][5]**: Review and explore your experiment results. 1. Learn more about **[Minimum Detectable Effects][3]**: Choose an appropriately sized MDE. - - - - - - - - - - - - - ## Further reading {{< partial name="whats-next/whats-next.html" >}} [1]: https://app.datadoghq.com/product-analytics/experiments -[2]: /product_analytics/experimentation/defining_metrics -[3]: /product_analytics/experimentation/minimum_detectable_effect +[2]: /experiments/defining_metrics +[3]: /experiments/minimum_detectable_effect [4]: /getting_started/feature_flags/ -[5]: /product_analytics/experimentation/reading_results +[5]: /experiments/reading_results diff --git a/content/en/product_analytics/experimentation/defining_metrics.md b/content/en/experiments/defining_metrics.md similarity index 93% rename from content/en/product_analytics/experimentation/defining_metrics.md rename to content/en/experiments/defining_metrics.md index 7181f86943f..7432f342832 100644 --- a/content/en/product_analytics/experimentation/defining_metrics.md +++ b/content/en/experiments/defining_metrics.md @@ -1,18 +1,20 @@ --- title: Defining Metrics -description: Define the metrics you want to measure during your experimentation. +description: Define the metrics you want to measure during your experiments. +aliases: + - /product_analytics/experimentation/defining_metrics/ further_reading: - link: "https://www.datadoghq.com/blog/datadog-product-analytics/" tag: "Blog" text: "Make data-driven design decisions with Product Analytics" -- link: "/product_analytics/experimentation/reading_results" +- link: "/experiments/reading_results" tag: "Documentation" text: "Reading Experiment Results" --- ## Overview -Define the metrics you want to measure during your experimentation. Metrics can be built using Product Analytics and Real User Monitoring (RUM) data. +Define the metrics you want to measure during your experiments. Metrics can be built using Product Analytics and Real User Monitoring (RUM) data.
In order to create a metric, you must have Datadog’s client SDK installed in your application and be actively capturing data.
@@ -35,7 +37,6 @@ After you’ve selected your event of interest, you can specify an aggregation m {{< img src="/product_analytics/experiment/exp_default_metric_agg.png" alt="Dropdown menu to select the method of aggregation for metrics." style="width:90%;" >}} - ### Default metric normalization All metrics are normalized by the number of enrolled subjects. For example, a **count of unique users** metric is computed as: @@ -68,17 +69,13 @@ For example, an e-commerce company that wants to measure the _Average Order Valu Datadog’s statistical engine accounts for correlations between the numerator and denominator using the [delta method][2]. - ## Add filters You can also add filters to your metrics, similar to other [Product Analytics dashboards][3]. For instance, you might want to filter page views based on referring URL or UTM parameters. Similarly, you might want to filter actions to a specific page or value of a custom attribute. As you add filters, you can check metric values in real time using the chart on the right. - {{< img src="/product_analytics/experiment/exp_filter_by.png" alt="Filter flow to scope your metric by specific properties." style="width:90%;" >}} - - ## Advanced options -Datadog supports several advanced options specific to experimentation: +Datadog supports several advanced options specific to Experiments: `Timeframe filters` : - By default, Datadog will include all events between a user's first exposure and the end of the experiment. If you want to measure a time-boxed value such as “sessions within 7 days”, you can add a timeframe filter. @@ -92,10 +89,6 @@ Datadog supports several advanced options specific to experimentation: : - Real world data often includes extreme outliers that can impact experiment results. - Use this setting to set a threshold at which data is truncated. For instance, set a 99% upper bound to truncate all results at the metric’s 99th percentile. - - - - ## Further reading {{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/product_analytics/experimentation/minimum_detectable_effect.md b/content/en/experiments/minimum_detectable_effect.md similarity index 97% rename from content/en/product_analytics/experimentation/minimum_detectable_effect.md rename to content/en/experiments/minimum_detectable_effect.md index 9c177facaff..7ae352e70d1 100644 --- a/content/en/product_analytics/experimentation/minimum_detectable_effect.md +++ b/content/en/experiments/minimum_detectable_effect.md @@ -1,6 +1,8 @@ --- title: Minimum Detectable Effects description: Determine the smallest detectable difference that may result in a statistically significant experiment result. +aliases: + - /product_analytics/experimentation/minimum_detectable_effect/ further_reading: - link: "https://www.datadoghq.com/blog/datadog-product-analytics/" tag: "Blog" diff --git a/content/en/product_analytics/experimentation/reading_results.md b/content/en/experiments/reading_results.md similarity index 85% rename from content/en/product_analytics/experimentation/reading_results.md rename to content/en/experiments/reading_results.md index 43d395b3b2c..72e3dc6e58d 100644 --- a/content/en/product_analytics/experimentation/reading_results.md +++ b/content/en/experiments/reading_results.md @@ -1,6 +1,8 @@ --- title: Reading Experiment Results -description: Read and understand the results of your Experimentation. +description: Read and understand the results of your experiments. +aliases: + - /product_analytics/experimentation/reading_results/ further_reading: - link: "https://www.datadoghq.com/blog/datadog-product-analytics/" tag: "Blog" @@ -12,7 +14,7 @@ further_reading: ## Overview -After [launching your experiment][1], Datadog immediately begins calculating results for your selected metrics. You can add additional metrics at any time, organize metrics into groups, and explore related user sessions to understand the impact of each variant. +After [launching your experiment][1], Datadog begins calculating results for your selected metrics. You can add additional metrics, organize metrics into groups, and explore related user sessions to understand the impact of each variant. {{< img src="/product_analytics/experiment/exp_reading_exps_overview.png" alt="A view of the metrics and their variations in the control and experiment groups ." style="width:90%;" >}} @@ -34,18 +36,13 @@ If the entire confidence interval is above zero, then the result is statisticall ## Exploring results To dive deeper into experiment results, hover over a metric and click **Chart**. This gives you the option to compare the experiment’s impact across different user segments. - ### Segment-level results Subject level properties are based on attributes at the initial time of exposure (for example, region, new vistor vs repeat visitor etc.). This is useful for understanding when certain cohorts of users reacted differently to the new experience. - {{< img src="/product_analytics/experiment/exp_segment_view.png" alt="Segment-level view of metric 'click on ADD TO CART' split by four country ISO code." style="width:90%;" >}} - - - ## Further reading {{< partial name="whats-next/whats-next.html" >}} -[1]: /product_analytics/experimentation/ \ No newline at end of file +[1]: /experiments/ \ No newline at end of file diff --git a/content/en/product_analytics/experimentation/troubleshooting.md b/content/en/experiments/troubleshooting.md similarity index 99% rename from content/en/product_analytics/experimentation/troubleshooting.md rename to content/en/experiments/troubleshooting.md index ee5bb71807b..631ecf7486d 100644 --- a/content/en/product_analytics/experimentation/troubleshooting.md +++ b/content/en/experiments/troubleshooting.md @@ -1,6 +1,8 @@ --- title: Troubleshooting description: Troubleshoot issues when running experiments. +aliases: + - /product_analytics/experimentation/troubleshooting/ further_reading: - link: "https://www.datadoghq.com/blog/datadog-product-analytics" tag: "Blog" From ebee1635df784da1194e5931267e27f1c4e9ad4c Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Mon, 16 Mar 2026 11:38:57 -0400 Subject: [PATCH 02/11] modified sentence and fixed legacy typo --- content/en/experiments/defining_metrics.md | 2 +- content/en/experiments/reading_results.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/content/en/experiments/defining_metrics.md b/content/en/experiments/defining_metrics.md index 7432f342832..50b234d2530 100644 --- a/content/en/experiments/defining_metrics.md +++ b/content/en/experiments/defining_metrics.md @@ -75,7 +75,7 @@ You can also add filters to your metrics, similar to other [Product Analytics da {{< img src="/product_analytics/experiment/exp_filter_by.png" alt="Filter flow to scope your metric by specific properties." style="width:90%;" >}} ## Advanced options -Datadog supports several advanced options specific to Experiments: +Datadog Experiments supports several advanced options: `Timeframe filters` : - By default, Datadog will include all events between a user's first exposure and the end of the experiment. If you want to measure a time-boxed value such as “sessions within 7 days”, you can add a timeframe filter. diff --git a/content/en/experiments/reading_results.md b/content/en/experiments/reading_results.md index 72e3dc6e58d..c01753177fd 100644 --- a/content/en/experiments/reading_results.md +++ b/content/en/experiments/reading_results.md @@ -37,7 +37,7 @@ If the entire confidence interval is above zero, then the result is statisticall To dive deeper into experiment results, hover over a metric and click **Chart**. This gives you the option to compare the experiment’s impact across different user segments. ### Segment-level results -Subject level properties are based on attributes at the initial time of exposure (for example, region, new vistor vs repeat visitor etc.). This is useful for understanding when certain cohorts of users reacted differently to the new experience. +Subject level properties are based on attributes at the initial time of exposure (for example, region, new visitor vs repeat visitor). This is useful for understanding when certain cohorts of users reacted differently to the new experience. {{< img src="/product_analytics/experiment/exp_segment_view.png" alt="Segment-level view of metric 'click on ADD TO CART' split by four country ISO code." style="width:90%;" >}} From 436bfa7449ed5d6088de41fedb9206f76c7226e2 Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Mon, 16 Mar 2026 11:40:17 -0400 Subject: [PATCH 03/11] sentence change. --- content/en/experiments/defining_metrics.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/experiments/defining_metrics.md b/content/en/experiments/defining_metrics.md index 50b234d2530..f4f9cca72bb 100644 --- a/content/en/experiments/defining_metrics.md +++ b/content/en/experiments/defining_metrics.md @@ -16,7 +16,7 @@ further_reading: Define the metrics you want to measure during your experiments. Metrics can be built using Product Analytics and Real User Monitoring (RUM) data. -
In order to create a metric, you must have Datadog’s client SDK installed in your application and be actively capturing data. +
To create a metric, you must have Datadog’s client SDK installed in your application and be actively capturing data.
## Create your first metric From 9a4f5c9c379a264dd94c1499c8e9ad015c25e492 Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Mon, 16 Mar 2026 15:16:46 -0400 Subject: [PATCH 04/11] quick fixes --- config/_default/menus/main.en.yaml | 4 ++-- content/en/experiments/_index.md | 14 +++++++------- content/en/experiments/reading_results.md | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/config/_default/menus/main.en.yaml b/config/_default/menus/main.en.yaml index 8917ffc4713..9501d9d3b46 100644 --- a/config/_default/menus/main.en.yaml +++ b/config/_default/menus/main.en.yaml @@ -8789,7 +8789,7 @@ menu: parent: rum weight: 13 - name: Product Analytics - url: product_analytics + url: product_analytics/ pre: product-analytics identifier: product_analytics parent: digital_experience_heading @@ -8840,7 +8840,7 @@ menu: identifier: pa_profiles weight: 4 - name: Experiments - url: experiments + url: experiments/ pre: experiment-wui parent: digital_experience_heading identifier: pa_experiments diff --git a/content/en/experiments/_index.md b/content/en/experiments/_index.md index d945e6cc64f..72e66a5fe4f 100644 --- a/content/en/experiments/_index.md +++ b/content/en/experiments/_index.md @@ -1,6 +1,6 @@ --- title: Planning and Launching Experiments -description: Use Datadog Experiments to measure the causal relationship new experiences or features have on user outcomes. +description: Use Datadog Experiments to measure the causal relationship that new experiences or features have on user outcomes. aliases: - /product_analytics/experimentation/ further_reading: @@ -30,13 +30,13 @@ To create, configure, and launch your experiment, complete the following steps: 2. Click **+ Create Experiment**. 3. Enter your experiment name and hypothesis. -{{< img src="/product_analytics/experiment/exp_create_experiment.png" alt="create an experiment and add a hypothesis for the experiment." style="width:80%;" >}} +{{< img src="/product_analytics/experiment/exp_create_experiment.png" alt="The experiment creation form with fields for experiment name and hypothesis." style="width:80%;" >}} ### Step 2 - Add metrics After you’ve created an experiment, add your primary metric and optional guardrails. See [Defining Metrics][2] for details on how to create metrics. -{{< img src="/product_analytics/experiment/exp_decision_metrics1.png" alt="create an experiment and add a hypothesis for the experiment." style="width:80%;" >}} +{{< img src="/product_analytics/experiment/exp_decision_metrics1.png" alt="The metrics configuration panel with options for primary metric and guardrails." style="width:80%;" >}} #### Add a sample size calculation (optional) @@ -47,22 +47,22 @@ After selecting your experiment’s metrics, use the optional sample size calcul 1. Click **Run calculation** to see the [Minimum Detectable Effects][3] (MDE) your experiment has on your metrics. The MDE is the smallest difference that you are able to detect between your experiment’s variants. -{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="Sleect an entrypoint event to run a sample size calculation" style="width:90%;" >}} +{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="Select an entrypoint event to run a sample size calculation" style="width:90%;" >}} ### Step 3 - Launch your experiment After specifying your metrics, you can launch your experiment. -1. Select a Feature Flag that captures the variants you want to test. If you have not yet created a feature flag, see the [Getting Started with Feature Flags][4] page. +1. Select a Feature flag that captures the variants you want to test. If you have not yet created a feature flag, see the [Getting Started with Feature Flags][4] page. -1. Click **Set up experiment on feature flag** to specify how you want to roll out your experiment. You can either launch the experiment to all traffic, or schedule a gradual rollout. +1. Click **Set Up Experiment on Feature Flag** to specify how you want to roll out your experiment. You can either launch the experiment to all traffic, or schedule a gradual rollout. {{< img src="/product_analytics/experiment/exp_feature_flag.png" alt="Set up an experiment on a Feature Flag." style="width:90%;" >}} ## Next steps 1. **[Defining metrics][2]**: Define the metrics you want to measure during your experiments. 1. **[Reading Experiment Results][5]**: Review and explore your experiment results. -1. Learn more about **[Minimum Detectable Effects][3]**: Choose an appropriately sized MDE. +1. **[Minimum Detectable Effects][3]**: Choose an appropriately sized MDE. ## Further reading diff --git a/content/en/experiments/reading_results.md b/content/en/experiments/reading_results.md index c01753177fd..5be60f95943 100644 --- a/content/en/experiments/reading_results.md +++ b/content/en/experiments/reading_results.md @@ -45,4 +45,4 @@ Subject level properties are based on attributes at the initial time of exposure ## Further reading {{< partial name="whats-next/whats-next.html" >}} -[1]: /experiments/ \ No newline at end of file +[1]: /experiments/ From 6683d562899779f152304d4a046324dbbd65db64 Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Mon, 16 Mar 2026 15:30:56 -0400 Subject: [PATCH 05/11] ran the /review skill and made edits --- content/en/experiments/_index.md | 2 +- content/en/experiments/minimum_detectable_effect.md | 5 ----- content/en/experiments/reading_results.md | 2 +- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/content/en/experiments/_index.md b/content/en/experiments/_index.md index 72e66a5fe4f..fd5dd75ebdf 100644 --- a/content/en/experiments/_index.md +++ b/content/en/experiments/_index.md @@ -47,7 +47,7 @@ After selecting your experiment’s metrics, use the optional sample size calcul 1. Click **Run calculation** to see the [Minimum Detectable Effects][3] (MDE) your experiment has on your metrics. The MDE is the smallest difference that you are able to detect between your experiment’s variants. -{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="Select an entrypoint event to run a sample size calculation" style="width:90%;" >}} +{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="Select an entry point event to run a sample size calculation" style="width:90%;" >}} ### Step 3 - Launch your experiment diff --git a/content/en/experiments/minimum_detectable_effect.md b/content/en/experiments/minimum_detectable_effect.md index 7ae352e70d1..f7504c792e8 100644 --- a/content/en/experiments/minimum_detectable_effect.md +++ b/content/en/experiments/minimum_detectable_effect.md @@ -42,10 +42,5 @@ If the MDE is too small, the experiment may require excessive traffic or run tim A common way to choose an MDE is to examine results from past experiments. For example, if historical experiments in a particular domain typically yield effects of 5–10%, selecting an MDE near the lower end of that range (such as 5%) can be a reasonable starting point. - - - - - ## Further reading {{< partial name="whats-next/whats-next.html" >}} diff --git a/content/en/experiments/reading_results.md b/content/en/experiments/reading_results.md index 5be60f95943..80679409c9f 100644 --- a/content/en/experiments/reading_results.md +++ b/content/en/experiments/reading_results.md @@ -16,7 +16,7 @@ further_reading: After [launching your experiment][1], Datadog begins calculating results for your selected metrics. You can add additional metrics, organize metrics into groups, and explore related user sessions to understand the impact of each variant. -{{< img src="/product_analytics/experiment/exp_reading_exps_overview.png" alt="A view of the metrics and their variations in the control and experiment groups ." style="width:90%;" >}} +{{< img src="/product_analytics/experiment/exp_reading_exps_overview.png" alt="A view of the metrics and their variations in the control and experiment groups." style="width:90%;" >}} ## Confidence intervals For each metric, Datadog shows the average per-subject value (typically per user) for both the control and treatment variants. It also reports the relative lift and the associated confidence interval. From ea4b4da236ef4ab31bf52b0f5709e84fbc1e4afe Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Mon, 16 Mar 2026 15:50:12 -0400 Subject: [PATCH 06/11] quick fixes on feature flag capitalization --- content/en/experiments/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/en/experiments/_index.md b/content/en/experiments/_index.md index fd5dd75ebdf..67da29ba3fa 100644 --- a/content/en/experiments/_index.md +++ b/content/en/experiments/_index.md @@ -17,7 +17,7 @@ Datadog Experiments is in Preview. Complete the form to request access. {{< /callout >}} ## Overview -Datadog Experiments allows you to measure the causal relationship that new experiences and features have on user outcomes. Experiments uses [Feature flags][4] to randomly allocate traffic between two or more variations, using one of the variations as a control group. +Datadog Experiments allows you to measure the causal relationship that new experiences and features have on user outcomes. Experiments uses [Feature Flags][4] to randomly allocate traffic between two or more variations, using one of the variations as a control group. This page walks you through planning and launching your experiments. @@ -53,7 +53,7 @@ After selecting your experiment’s metrics, use the optional sample size calcul After specifying your metrics, you can launch your experiment. -1. Select a Feature flag that captures the variants you want to test. If you have not yet created a feature flag, see the [Getting Started with Feature Flags][4] page. +1. Select a Feature Flag that captures the variants you want to test. If you have not yet created a feature flag, see the [Getting Started with Feature Flags][4] page. 1. Click **Set Up Experiment on Feature Flag** to specify how you want to roll out your experiment. You can either launch the experiment to all traffic, or schedule a gradual rollout. From ffa727e80f35ad4cb7967036f20a884986a844af Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Wed, 18 Mar 2026 11:01:15 -0400 Subject: [PATCH 07/11] changes Experiments to Datadog Experiments to avoid the verd disagreement --- content/en/experiments/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/experiments/_index.md b/content/en/experiments/_index.md index 67da29ba3fa..c3a813bfc13 100644 --- a/content/en/experiments/_index.md +++ b/content/en/experiments/_index.md @@ -17,7 +17,7 @@ Datadog Experiments is in Preview. Complete the form to request access. {{< /callout >}} ## Overview -Datadog Experiments allows you to measure the causal relationship that new experiences and features have on user outcomes. Experiments uses [Feature Flags][4] to randomly allocate traffic between two or more variations, using one of the variations as a control group. +Datadog Experiments allows you to measure the causal relationship that new experiences and features have on user outcomes. Datadog Experiments uses [Feature Flags][4] to randomly allocate traffic between two or more variations, using one of the variations as a control group. This page walks you through planning and launching your experiments. From 096be80b3ffccfd63d39c41c90cb1a43504e0118 Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Wed, 18 Mar 2026 11:12:29 -0400 Subject: [PATCH 08/11] quick alt text change --- content/en/experiments/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/experiments/_index.md b/content/en/experiments/_index.md index c3a813bfc13..1de93adb95c 100644 --- a/content/en/experiments/_index.md +++ b/content/en/experiments/_index.md @@ -47,7 +47,7 @@ After selecting your experiment’s metrics, use the optional sample size calcul 1. Click **Run calculation** to see the [Minimum Detectable Effects][3] (MDE) your experiment has on your metrics. The MDE is the smallest difference that you are able to detect between your experiment’s variants. -{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="Select an entry point event to run a sample size calculation" style="width:90%;" >}} +{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="The sample size calculation panel showing entry point selection and minimum detectable effects for each metric." style="width:90%;" >}} ### Step 3 - Launch your experiment From 901454fbf769135c6e16ea4dd3b99708a6f15df7 Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Wed, 18 Mar 2026 11:16:34 -0400 Subject: [PATCH 09/11] changes to the Entrypoint writing --- content/en/experiments/_index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/en/experiments/_index.md b/content/en/experiments/_index.md index 1de93adb95c..c59e8ab0de9 100644 --- a/content/en/experiments/_index.md +++ b/content/en/experiments/_index.md @@ -42,12 +42,12 @@ After you’ve created an experiment, add your primary metric and optional guard After selecting your experiment’s metrics, use the optional sample size calculator to determine how small of a change your experiment can reliably detect with your current sample size. -1. Select the entry point of your experiment. This specifies _when_ in the user journey they will be enrolled into the test. +1. Select the entrypoint of your experiment. This specifies _when_ in the user journey they will be enrolled into the test. For example, if you plan to run an experiment on users who visit the homepage, select the homepage view as your entry point. 1. Click **Run calculation** to see the [Minimum Detectable Effects][3] (MDE) your experiment has on your metrics. The MDE is the smallest difference that you are able to detect between your experiment’s variants. -{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="The sample size calculation panel showing entry point selection and minimum detectable effects for each metric." style="width:90%;" >}} +{{< img src="/product_analytics/experiment/exp_sample_size.png" alt="The Sample Size Calculator modal with the Entrypoint Event dropdown highlighted." style="width:90%;" >}} ### Step 3 - Launch your experiment From d51efd44ca8cfb0f9284f39074759ed73d7143fa Mon Sep 17 00:00:00 2001 From: Ida Adjivon Date: Wed, 18 Mar 2026 11:29:18 -0400 Subject: [PATCH 10/11] wuick alt text change --- content/en/experiments/reading_results.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/content/en/experiments/reading_results.md b/content/en/experiments/reading_results.md index 80679409c9f..233a4b9fd9e 100644 --- a/content/en/experiments/reading_results.md +++ b/content/en/experiments/reading_results.md @@ -16,7 +16,7 @@ further_reading: After [launching your experiment][1], Datadog begins calculating results for your selected metrics. You can add additional metrics, organize metrics into groups, and explore related user sessions to understand the impact of each variant. -{{< img src="/product_analytics/experiment/exp_reading_exps_overview.png" alt="A view of the metrics and their variations in the control and experiment groups." style="width:90%;" >}} +{{< img src="/product_analytics/experiment/exp_reading_exps_overview.png" alt="The experiment results overview showing a decision metrics table with control and treatment values, relative lift, and confidence interval bars for three metrics." style="width:90%;" >}} ## Confidence intervals For each metric, Datadog shows the average per-subject value (typically per user) for both the control and treatment variants. It also reports the relative lift and the associated confidence interval. @@ -39,7 +39,7 @@ To dive deeper into experiment results, hover over a metric and click **Chart**. ### Segment-level results Subject level properties are based on attributes at the initial time of exposure (for example, region, new visitor vs repeat visitor). This is useful for understanding when certain cohorts of users reacted differently to the new experience. -{{< img src="/product_analytics/experiment/exp_segment_view.png" alt="Segment-level view of metric 'click on ADD TO CART' split by four country ISO code." style="width:90%;" >}} +{{< img src="/product_analytics/experiment/exp_segment_view.png" alt="Segment-level view of a metric split by Country ISO Code, showing a bar chart of relative lift and a data table with control and treatment values per country." style="width:90%;" >}} ## Further reading From 49c78c71594f1717e39e2b4f00c8f45052e30e0e Mon Sep 17 00:00:00 2001 From: Ida Adjivon <65119712+iadjivon@users.noreply.github.com> Date: Wed, 18 Mar 2026 11:38:33 -0400 Subject: [PATCH 11/11] Apply suggestion from @iadjivon --- content/en/experiments/reading_results.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/experiments/reading_results.md b/content/en/experiments/reading_results.md index 233a4b9fd9e..a4a9c3d74a6 100644 --- a/content/en/experiments/reading_results.md +++ b/content/en/experiments/reading_results.md @@ -37,7 +37,7 @@ If the entire confidence interval is above zero, then the result is statisticall To dive deeper into experiment results, hover over a metric and click **Chart**. This gives you the option to compare the experiment’s impact across different user segments. ### Segment-level results -Subject level properties are based on attributes at the initial time of exposure (for example, region, new visitor vs repeat visitor). This is useful for understanding when certain cohorts of users reacted differently to the new experience. +Subject level properties are based on attributes at the initial time of exposure (for example, a user's region, whether they are a new visitor, or a repeat visitor). This is useful for understanding when certain cohorts of users reacted differently to the new experience. {{< img src="/product_analytics/experiment/exp_segment_view.png" alt="Segment-level view of a metric split by Country ISO Code, showing a bar chart of relative lift and a data table with control and treatment values per country." style="width:90%;" >}}