From d629fee9dffd6a4899b0bc4b9332dc0e326f5977 Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Tue, 17 Mar 2026 17:04:09 -0500 Subject: [PATCH 1/8] WIP port of R and Python vignettes to multilingual quarto vignettes --- .gitignore | 4 + vignettes/.gitignore | 4 + vignettes/Python/IV/iv.html | 8882 ------------------------------- vignettes/Python/RDD/rdd.html | 8089 ---------------------------- vignettes/_quarto.yml | 49 + vignettes/bart.qmd | 210 + vignettes/bcf.qmd | 129 + vignettes/custom-sampling.qmd | 133 + vignettes/ensemble-kernel.qmd | 62 + vignettes/heteroskedastic.qmd | 107 + vignettes/index.qmd | 42 + vignettes/iv.qmd | 938 ++++ vignettes/multi-chain.qmd | 125 + vignettes/multivariate-bcf.qmd | 53 + vignettes/ordinal-outcome.qmd | 53 + vignettes/prior-calibration.qmd | 62 + vignettes/rdd.qmd | 472 ++ vignettes/serialization.qmd | 114 + vignettes/sklearn.qmd | 64 + vignettes/summary-plotting.qmd | 56 + vignettes/tree-inspection.qmd | 114 + vignettes/vignettes.bib | 127 + 22 files changed, 2918 insertions(+), 16971 deletions(-) create mode 100644 vignettes/.gitignore delete mode 100644 vignettes/Python/IV/iv.html delete mode 100644 vignettes/Python/RDD/rdd.html create mode 100644 vignettes/_quarto.yml create mode 100644 vignettes/bart.qmd create mode 100644 vignettes/bcf.qmd create mode 100644 vignettes/custom-sampling.qmd create mode 100644 vignettes/ensemble-kernel.qmd create mode 100644 vignettes/heteroskedastic.qmd create mode 100644 vignettes/index.qmd create mode 100644 vignettes/iv.qmd create mode 100644 vignettes/multi-chain.qmd create mode 100644 vignettes/multivariate-bcf.qmd create mode 100644 vignettes/ordinal-outcome.qmd create mode 100644 vignettes/prior-calibration.qmd create mode 100644 vignettes/rdd.qmd create mode 100644 vignettes/serialization.qmd create mode 100644 vignettes/sklearn.qmd create mode 100644 vignettes/summary-plotting.qmd create mode 100644 vignettes/tree-inspection.qmd create mode 100644 vignettes/vignettes.bib diff --git a/.gitignore b/.gitignore index 03c3bc709..0c5a93ab1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,9 @@ # Editor / IDE settings *.vscode +*.claude + +# Other +dev/quartodoc-spike/ # Dependencies /node_modules diff --git a/vignettes/.gitignore b/vignettes/.gitignore new file mode 100644 index 000000000..6041614a6 --- /dev/null +++ b/vignettes/.gitignore @@ -0,0 +1,4 @@ +/.quarto/ +**/*.quarto_ipynb +_freeze/ +_site/ \ No newline at end of file diff --git a/vignettes/Python/IV/iv.html b/vignettes/Python/IV/iv.html deleted file mode 100644 index c99a5e793..000000000 --- a/vignettes/Python/IV/iv.html +++ /dev/null @@ -1,8882 +0,0 @@ - - - - - -iv - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - diff --git a/vignettes/Python/RDD/rdd.html b/vignettes/Python/RDD/rdd.html deleted file mode 100644 index 09923ab16..000000000 --- a/vignettes/Python/RDD/rdd.html +++ /dev/null @@ -1,8089 +0,0 @@ - - - - - -rdd - - - - - - - - - - - - -
- - - - - - - - - - - - - - - - - - - - - -
- - diff --git a/vignettes/_quarto.yml b/vignettes/_quarto.yml new file mode 100644 index 000000000..2431a0cdb --- /dev/null +++ b/vignettes/_quarto.yml @@ -0,0 +1,49 @@ +project: + type: website + output-dir: _site + +website: + title: "StochTree Vignettes" + navbar: + left: + - href: index.qmd + text: Home + sidebar: + - title: "Vignettes" + contents: + - index.qmd + - section: "Core Models" + contents: + - bart.qmd + - bcf.qmd + - heteroskedastic.qmd + - multi-chain.qmd + - ordinal-outcome.qmd + - multivariate-bcf.qmd + - section: "Practical Topics" + contents: + - serialization.qmd + - tree-inspection.qmd + - summary-plotting.qmd + - prior-calibration.qmd + - sklearn.qmd + - section: "Low-Level Interface" + contents: + - custom-sampling.qmd + - ensemble-kernel.qmd + - section: "Advanced Methods" + contents: + - rdd.qmd + - iv.qmd + +format: + html: + theme: cosmo + toc: true + toc-depth: 3 + grid: + body-width: 960px + margin-width: 200px + +execute: + freeze: auto diff --git a/vignettes/bart.qmd b/vignettes/bart.qmd new file mode 100644 index 000000000..8f1ea9277 --- /dev/null +++ b/vignettes/bart.qmd @@ -0,0 +1,210 @@ +--- +title: "Bayesian Supervised Learning in StochTree" +bibliography: vignettes.bib +--- + +This vignette demonstrates how to sample variants of the BART model (@chipman2010bart), using the `bart()` function in `stochtree`. The original BART model is + +$$ +\begin{aligned} +y_i \mid X_i = x_i &\sim \mathcal{N}(f(x_i), \sigma^2)\\ +\sigma^2 &\sim \text{IG}\left(\frac{\nu}{2}, \frac{\nu \lambda}{2}\right) +\end{aligned} +$$ + +where + +$$ +f(X) = \sum_{s=1}^m g_s(X) +$$ + +and each $g_s$ refers to a decision tree function which partitions $X$ into $k_s$ mutually exclusive regions ($\mathcal{A}_s = \mathcal{A}_{s,1} \cup \dots \cup \mathcal{A}_{s,k_s}$) and assigns a scalar parameter $\mu_{s,j}$ to each region $\mathcal{A}_{s,j}$ + +$$ +g_s(x) = \sum_{j = 1}^{k_s} \mu_{s,j} \mathbb{I}\left(x \in \mathcal{A}_{s,j}\right). +$$ + +The partitions $\mathcal{A}_s$ are defined by a series of logical split rules $X_i \leq c$ where $i$ is a variable index and $c$ is a numeric cutpoint and these partitions are guided by a uniform prior on variables and cutpoints. The prior on partitions is further specified by a probability of splitting a node + +$$ +P(\text{split node } \eta) = \alpha (1 + \text{depth}_{\eta})^{-\beta} +$$ + +The prior for each leaf node parameter is + +$$ +\mu_{s,j} \sim \mathcal{N}\left(0, \sigma^2_{\mu}\right) +$$ + +Together, we refer to this conditional mean model as + +$$ +f(X) &\sim \text{BART}(\alpha, \beta, m) +$$ + +This is the "core" of stochtree's supervised learning interface, though we support many expanded models including + +* linear leaf regression (i.e. each leaf node evaluates a linear regression on basis $W$ rather than return a constant), +* additive random effects, +* forest-based heteroskedasticity, +* binary / ordinal outcome modeling using the probit and complementary log-log (cloglog) links, + +and we offer the ability to sample any of the above models using the MCMC or the Grow-From-Root sampler (@he2023stochastic). + +# Setup + +To begin, we load the `stochtree` and other necessary packages. + +::::{.panel-tabset group="language"} + +## R + +```{r} +library(stochtree) +``` + +## Python + +```{python} +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split + +from stochtree import BARTModel +``` + +:::: + +We set a seed for reproducibility + +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed <- 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +:::: + +# Demo 1: Step Function + +## Data Generation + +We generate data from a simple step function and split into test and train sets. + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Generate the data +n <- 500 +p_x <- 10 +snr <- 3 +X <- matrix(runif(n * p_x), ncol = p_x) +f_XW <- (((0 <= X[, 1]) & (0.25 > X[, 1])) * + (-7.5) + + ((0.25 <= X[, 1]) & (0.5 > X[, 1])) * (-2.5) + + ((0.5 <= X[, 1]) & (0.75 > X[, 1])) * (2.5) + + ((0.75 <= X[, 1]) & (1 > X[, 1])) * (7.5)) +noise_sd <- sd(f_XW) / snr +y <- f_XW + rnorm(n, 0, 1) * noise_sd + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct * n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds, ]) +X_train <- as.data.frame(X[train_inds, ]) +y_test <- y[test_inds] +y_train <- y[train_inds] +``` + +## Python + +```{python} +# Generate the data +n = 500 +p_x = 10 +snr = 3 +X = rng.uniform(0, 1, (n, p_x)) +f_XW = ( + ((X[:, 0] >= 0.0) & (X[:, 0] < 0.25)) * (-7.5) + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * (-2.5) + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * (2.5) + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * (7.5) +) +noise_sd = np.std(f_XW) / snr +y = f_XW + rng.normal(0, noise_sd, n) + +# Split data into test and train sets +test_set_pct = 0.2 +X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_set_pct, random_state=random_seed +) +``` + +:::: + +## Sampling and Analysis + +We sample from a BART model of $y \mid X$ with 10 grow-from-root samples (@he2023stochastic) followed by 100 MCMC samples (this is the default in `stochtree`). We also specify $m = 100$ and we let both $\sigma^2$ and $\sigma^2_{\mu}$ be updated by Gibbs samplers. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = T) +mean_forest_params <- list(sample_sigma2_leaf = T, num_trees = 100) +bart_model <- stochtree::bart( + X_train = X_train, + y_train = y_train, + X_test = X_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + mean_forest_params = mean_forest_params +) +``` + +## Python + +```{python} +num_gfr = 10 +num_burnin = 0 +num_mcmc = 100 +general_params = {'sample_sigma2_global': True} +mean_forest_params = {'sample_sigma2_leaf': True, 'num_trees': 100} +bart_model = BARTModel() +bart_model.sample( + X_train = X_train, + y_train = y_train, + X_test = X_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + mean_forest_params = mean_forest_params +) +``` + +:::: + +# References diff --git a/vignettes/bcf.qmd b/vignettes/bcf.qmd new file mode 100644 index 000000000..ba523733f --- /dev/null +++ b/vignettes/bcf.qmd @@ -0,0 +1,129 @@ +--- +title: "BCF: Bayesian Causal Forests" +--- + +Bayesian Causal Forests (BCF) for estimating heterogeneous treatment effects, +with separate tree ensembles for the prognostic function and treatment effect. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/causal_inference.ipynb` +- **R**: pkgdown article `CausalInference` +::: + +## Background + +BCF [@hahn2020bayesian] models the conditional average treatment effect (CATE) +by fitting two separate tree ensembles: + +$$ +Y_i = \mu(X_i) + \tau(X_i) Z_i + \epsilon_i, \quad \epsilon_i \sim \mathcal{N}(0, \sigma^2) +$$ + +where $\mu(\cdot)$ is a prognostic forest and $\tau(\cdot)$ is a treatment effect forest. +The estimated propensity score $\hat{\pi}(X_i)$ is included as a covariate in $\mu(\cdot)$ +to reduce confounding bias. + +## Setup + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +library(stochtree) +``` + +## Python + +```{python} +#| eval: false +import stochtree +import numpy as np +``` + +:::: + +## Data Simulation + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Simulate observational data with heterogeneous treatment effects +``` + +## Python + +```{python} +#| eval: false +# Simulate observational data with heterogeneous treatment effects +``` + +:::: + +## Propensity Score Estimation + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Estimate propensity scores +``` + +## Python + +```{python} +#| eval: false +# Estimate propensity scores +``` + +:::: + +## Model Fitting + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Fit BCF model +``` + +## Python + +```{python} +#| eval: false +# Fit BCF model +``` + +:::: + +## Posterior Summaries + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Summarize CATE posterior +``` + +## Python + +```{python} +#| eval: false +# Summarize CATE posterior +``` + +:::: + +## References diff --git a/vignettes/custom-sampling.qmd b/vignettes/custom-sampling.qmd new file mode 100644 index 000000000..f518386f7 --- /dev/null +++ b/vignettes/custom-sampling.qmd @@ -0,0 +1,133 @@ +--- +title: "Custom Sampling Routine" +--- + +Building a custom Gibbs sampler using the low-level `stochtree` primitives — +`Dataset`, `Residual`, `ForestSampler`, `GlobalVarianceModel`, and +`LeafVarianceModel` — to implement non-standard models and sampling schemes. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/prototype_interface.ipynb` +- **R**: pkgdown article `CustomSamplingRoutine` +::: + +## Background + +The high-level `BARTModel` and `BCFModel` classes handle data preparation, +prior calibration, and sampling internally. The low-level interface exposes +these building blocks so you can: + +- Customize which parameters are sampled and in what order +- Insert additional Gibbs steps (e.g. for auxiliary variables) +- Implement novel models that share `stochtree`'s tree ensemble samplers + +## Setup + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +library(stochtree) +``` + +## Python + +```{python} +#| eval: false +from stochtree import ( + Dataset, + Residual, + ForestSampler, + GlobalVarianceModel, + LeafVarianceModel, + Forest, + ForestContainer, +) +import numpy as np +``` + +:::: + +## Preparing Data Structures + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Construct Dataset and Residual objects +``` + +## Python + +```{python} +#| eval: false +# Construct Dataset and Residual objects +``` + +:::: + +## Initializing Forests + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Initialize Forest and ForestContainer +``` + +## Python + +```{python} +#| eval: false +# Initialize Forest and ForestContainer +``` + +:::: + +## Custom Gibbs Loop + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Implement a full Gibbs sampler loop +``` + +## Python + +```{python} +#| eval: false +# Implement a full Gibbs sampler loop +``` + +:::: + +## Extracting Results + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Extract posterior samples from ForestContainer +``` + +## Python + +```{python} +#| eval: false +# Extract posterior samples from ForestContainer +``` + +:::: diff --git a/vignettes/ensemble-kernel.qmd b/vignettes/ensemble-kernel.qmd new file mode 100644 index 000000000..10491a937 --- /dev/null +++ b/vignettes/ensemble-kernel.qmd @@ -0,0 +1,62 @@ +--- +title: "Ensemble Kernel" +--- + +Using the `stochtree` tree ensemble as a kernel function — extracting the +posterior similarity matrix induced by leaf co-membership across trees and +MCMC samples. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **R**: pkgdown article `EnsembleKernel` +::: + +## Background + +Each tree in a BART ensemble partitions the covariate space into leaf regions. +Two observations $i$ and $j$ that fall in the same leaf of tree $t$ at sample $s$ +are "similar" according to that tree. Averaging over trees and posterior samples +gives a data-driven kernel: + +$$ +K(x_i, x_j) = \frac{1}{Sm} \sum_{s=1}^S \sum_{t=1}^m \mathbf{1}[\ell_t^{(s)}(x_i) = \ell_t^{(s)}(x_j)] +$$ + +This kernel can be used for clustering, Gaussian process inference, or as a +similarity measure for downstream tasks. + +## Setup + +```{r} +#| eval: false +library(stochtree) +``` + +## Fitting a Model + +```{r} +#| eval: false +# Fit a BART model +``` + +## Extracting the Kernel Matrix + +```{r} +#| eval: false +# Compute leaf co-membership kernel +``` + +## Visualizing the Kernel + +```{r} +#| eval: false +# Heatmap of kernel matrix +``` + +## Downstream Applications + +```{r} +#| eval: false +# Clustering or GP prediction with the ensemble kernel +``` diff --git a/vignettes/heteroskedastic.qmd b/vignettes/heteroskedastic.qmd new file mode 100644 index 000000000..e1f48d9b0 --- /dev/null +++ b/vignettes/heteroskedastic.qmd @@ -0,0 +1,107 @@ +--- +title: "Heteroskedastic BART" +--- + +BART with a forest-modeled error variance, where the residual variance +$\sigma^2(X_i)$ is itself a function of covariates learned via a separate +tree ensemble. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/heteroskedastic_supervised_learning.ipynb` +- **R**: pkgdown article `Heteroskedasticity` +::: + +## Background + +Standard BART assumes homoskedastic errors $\epsilon_i \sim \mathcal{N}(0, \sigma^2)$. +When error variance varies with covariates, modeling $\sigma^2(X_i)$ jointly with +the mean function improves inference. The model is: + +$$ +Y_i = \mu(X_i) + \epsilon_i, \quad \epsilon_i \sim \mathcal{N}(0, \sigma^2(X_i)) +$$ + +where both $\mu(\cdot)$ and $\log \sigma^2(\cdot)$ are modeled with BART ensembles. + +## Setup + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +library(stochtree) +``` + +## Python + +```{python} +#| eval: false +import stochtree +import numpy as np +``` + +:::: + +## Data Simulation + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Simulate data with non-constant variance +``` + +## Python + +```{python} +#| eval: false +# Simulate data with non-constant variance +``` + +:::: + +## Model Fitting + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Fit heteroskedastic BART +``` + +## Python + +```{python} +#| eval: false +# Fit heteroskedastic BART +``` + +:::: + +## Posterior Summaries + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Mean and variance posterior summaries +``` + +## Python + +```{python} +#| eval: false +# Mean and variance posterior summaries +``` + +:::: diff --git a/vignettes/index.qmd b/vignettes/index.qmd new file mode 100644 index 000000000..3a9d83eb1 --- /dev/null +++ b/vignettes/index.qmd @@ -0,0 +1,42 @@ +--- +title: "StochTree Vignettes" +--- + +Extended worked examples for the `stochtree` package, covering core models, +practical topics, and advanced causal inference methods. Each vignette presents +R and Python implementations side-by-side. + +## Core Models + +| Vignette | Description | +|---|---| +| [BART](bart.qmd) | Bayesian Additive Regression Trees for supervised learning | +| [BCF](bcf.qmd) | Bayesian Causal Forests for heterogeneous treatment effects | +| [Heteroskedastic BART](heteroskedastic.qmd) | BART with a forest-modeled error variance | +| [Multi-Chain Inference](multi-chain.qmd) | Running and combining multiple MCMC chains | +| [Ordinal Outcome Modeling](ordinal-outcome.qmd) | BART for ordinal responses | +| [Multivariate Treatment BCF](multivariate-bcf.qmd) | BCF with vector-valued treatments | + +## Practical Topics + +| Vignette | Description | +|---|---| +| [Model Serialization](serialization.qmd) | Saving and loading fitted models | +| [Tree Inspection](tree-inspection.qmd) | Examining individual trees in a fitted ensemble | +| [Summary and Plotting](summary-plotting.qmd) | Posterior summary utilities | +| [Prior Calibration](prior-calibration.qmd) | Choosing and calibrating tree priors | +| [Scikit-Learn Interface](sklearn.qmd) | Using stochtree via sklearn-compatible estimators | + +## Low-Level Interface + +| Vignette | Description | +|---|---| +| [Custom Sampling Routine](custom-sampling.qmd) | Building a custom Gibbs sampler with stochtree primitives | +| [Ensemble Kernel](ensemble-kernel.qmd) | Using the tree ensemble as a kernel | + +## Advanced Methods + +| Vignette | Description | +|---|---| +| [Regression Discontinuity Design](rdd.qmd) | BARDDT: leaf-regression BART for RDD | +| [Instrumental Variables](iv.qmd) | IV analysis via a custom monotone probit Gibbs sampler | diff --git a/vignettes/iv.qmd b/vignettes/iv.qmd new file mode 100644 index 000000000..3342d432e --- /dev/null +++ b/vignettes/iv.qmd @@ -0,0 +1,938 @@ +--- +title: "Instrumental Variables (IV) with stochtree" +author: + - name: P. Richard Hahn + affiliation: Arizona State University + - name: Drew Herren + affiliation: University of Texas at Austin +date: today +bibliography: R/IV/iv.bib +--- + +## Introduction + +Here we consider a causal inference problem with a binary treatment and a binary outcome +where there is unobserved confounding, but an exogenous instrument is available (also +binary). This problem requires several extensions to the basic BART model, all of which +can be implemented as Gibbs samplers using `stochtree`. Our analysis follows the +Bayesian nonparametric approach described in the supplement to @hahn2016bayesian. + +## Background + +To be concrete, suppose we wish to measure the effect of receiving a flu vaccine on the +probability of getting the flu. Individuals who opt to get a flu shot differ in many +ways from those that don't, and these lifestyle differences presumably also affect their +respective chances of getting the flu. However, a randomized encouragement design — +where some individuals are selected at random to receive extra incentive to get a flu +shot — allows us to tease apart the impact of the vaccine from the confounding factors. +This exact problem has been studied in @mcdonald1992effects, with follow-on analyses by +@hirano2000assessing, @richardson2011transparent, and @imbens2015causal. + +## Notation + +Let $V$ denote the treatment variable (vaccine). Let $Y$ denote the response +(getting the flu), $Z$ the instrument (encouragement), and $X$ an additional observable +covariate (patient age). + +Let $S$ denote the *principal strata*, an exhaustive characterization of how individuals +are affected by the encouragement. Some people will get a flu shot no matter what: +*always takers* ($a$). Some will not get the shot no matter what: *never takers* ($n$). +*Compliers* ($c$) would not have gotten the shot but for the encouragement. We assume +no *defiers* ($d$). + +## The Causal Diagram + +![The causal directed acyclic graph (CDAG) for the IV flu example. The dashed red arrow represents a potential direct effect of $Z$ on $Y$, whose absence is the exclusion restriction.](R/IV/IV_CDAG.png){width=50% fig-align="center"} + +The biggest question about this graph concerns the dashed red arrow from the putative +instrument $Z$ to the outcome. If that arrow is present, $Z$ is not a valid instrument. +The assumption that there is no such arrow is the *exclusion restriction*. We will +explore what inferences are possible when we remain agnostic about its presence. + +## Potential Outcomes + +There are six distinct random variables: $V(0)$, $V(1)$, $Y(0,0)$, $Y(1,0)$, $Y(0,1)$, +and $Y(1,1)$. The fundamental problem of causal inference is that some of these are +never simultaneously observed: + +| $i$ | $Z_i$ | $V_i(0)$ | $V_i(1)$ | $Y_i(0,0)$ | $Y_i(1,0)$ | $Y_i(0,1)$ | $Y_i(1,1)$ | +|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| +| 1 | 1 | ? | 1 | ? | ? | ? | 0 | +| 2 | 0 | 1 | ? | ? | 1 | ? | ? | +| 3 | 0 | 0 | ? | 1 | ? | ? | ? | +| 4 | 1 | ? | 0 | ? | ? | 0 | ? | +| $\vdots$ | $\vdots$ | $\vdots$ | $\vdots$ | $\vdots$ | $\vdots$ | $\vdots$ | $\vdots$ | + +The principal strata are defined by which potential treatment $V(z)$ is observed: + +| $V_i(0)$ | $V_i(1)$ | $S_i$ | +|:---:|:---:|:---:| +| 0 | 0 | Never Taker ($n$) | +| 1 | 1 | Always Taker ($a$) | +| 0 | 1 | Complier ($c$) | +| 1 | 0 | Defier ($d$) | + +## Estimands and Identification + +Let $\pi_s(x) = \Pr(S=s \mid X=x)$ and +$\gamma_s^{vz}(x) = \Pr(Y(v,z)=1 \mid S=s, X=x)$. +The complier conditional average treatment effect +$\gamma_c^{1,z}(x) - \gamma_c^{0,z}(x)$ is our ultimate goal. + +Under the monotonicity assumption ($\pi_d(x) = 0$), the observed data imply: + +$$ +\begin{aligned} +p_{1 \mid 00}(x) &= \frac{\pi_c(x)}{\pi_c(x)+\pi_n(x)} \gamma_c^{00}(x) + + \frac{\pi_n(x)}{\pi_c(x)+\pi_n(x)} \gamma_n^{00}(x) \\ +p_{1 \mid 11}(x) &= \frac{\pi_c(x)}{\pi_c(x)+\pi_a(x)} \gamma_c^{11}(x) + + \frac{\pi_a(x)}{\pi_c(x)+\pi_a(x)} \gamma_a^{11}(x) \\ +p_{1 \mid 01}(x) &= \gamma_n^{01}(x) \\ +p_{1 \mid 10}(x) &= \gamma_a^{10}(x) +\end{aligned} +$$ + +and the strata probabilities satisfy: + +$$ +\Pr(V=1 \mid Z=0, X=x) = \pi_a(x), \qquad +\Pr(V=1 \mid Z=1, X=x) = \pi_a(x) + \pi_c(x). +$$ + +Under the exclusion restriction, $\gamma_c^{11}(x)$ and $\gamma_c^{00}(x)$ are +point-identified. Without it, they are partially identified: + +$$ +\max\!\left(0,\, \frac{\pi_c+\pi_n}{\pi_c} p_{1\mid 00} - \frac{\pi_n}{\pi_c}\right) +\leq \gamma_c^{00}(x) \leq +\min\!\left(1,\, \frac{\pi_c+\pi_n}{\pi_c} p_{1\mid 00}\right), +$$ + +and analogously for $\gamma_c^{11}(x)$. + +## Load Libraries + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| include: false +reticulate::use_python( + Sys.getenv("RETICULATE_PYTHON", unset = Sys.which("python3")), + required = TRUE +) +``` + +```{r} +#| message: false +library(stochtree) +``` + +## Python + +```{python} +import numpy as np +import matplotlib.pyplot as plt +from scipy.stats import norm + +from stochtree import ( + RNG, Dataset, Forest, ForestContainer, + ForestSampler, Residual, ForestModelConfig, GlobalModelConfig, +) +``` + +::: + +## Simulate the Data + +### Setup + +:::{.panel-tabset group="language"} + +## R + +```{r} +n <- 20000 +random_seed <- NULL +``` + +## Python + +```{python} +n = 20000 +random_seed = None +if random_seed is not None: + rng = np.random.default_rng(random_seed) +else: + rng = np.random.default_rng() +``` + +::: + +### Generate the Instrument + +:::{.panel-tabset group="language"} + +## R + +```{r} +z <- rbinom(n, 1, 0.5) +``` + +## Python + +```{python} +z = rng.binomial(n=1, p=0.5, size=n) +``` + +::: + +### Generate the Covariate + +We think of $X$ as patient age, drawn from a uniform distribution on $[0, 3]$ +(pre-standardized for illustration purposes). + +:::{.panel-tabset group="language"} + +## R + +```{r} +p_X <- 1 +X <- matrix(runif(n * p_X, 0, 3), ncol = p_X) +x <- X[, 1] +``` + +## Python + +```{python} +p_X = 1 +X = rng.uniform(low=0., high=3., size=(n, p_X)) +x = X[:, 0] +``` + +::: + +### Generate the Principal Strata + +We generate $S$ from a logistic model in $X$, parameterized so that the probability +of being a never taker decreases with age. + +:::{.panel-tabset group="language"} + +## R + +```{r} +alpha_a <- 0; beta_a <- 1 +alpha_n <- 1; beta_n <- -1 +alpha_c <- 1; beta_c <- 1 + +pi_s <- function(xval) { + w_a <- exp(alpha_a + beta_a * xval) + w_n <- exp(alpha_n + beta_n * xval) + w_c <- exp(alpha_c + beta_c * xval) + w <- cbind(w_a, w_n, w_c) + w / rowSums(w) +} + +s <- sapply(seq_len(n), function(j) + sample(c("a", "n", "c"), 1, prob = pi_s(X[j, 1]))) +``` + +## Python + +```{python} +alpha_a = 0; beta_a = 1 +alpha_n = 1; beta_n = -1 +alpha_c = 1; beta_c = 1 + +def pi_s(xval, alpha_a, beta_a, alpha_n, beta_n, alpha_c, beta_c): + w = np.column_stack([ + np.exp(alpha_a + beta_a * xval), + np.exp(alpha_n + beta_n * xval), + np.exp(alpha_c + beta_c * xval), + ]) + return w / w.sum(axis=1, keepdims=True) + +strata_probs = pi_s(X[:, 0], alpha_a, beta_a, alpha_n, beta_n, alpha_c, beta_c) +s = np.empty(n, dtype=str) +for i in range(n): + s[i] = rng.choice(['a', 'n', 'c'], p=strata_probs[i, :]) +``` + +::: + +### Generate the Treatment + +The treatment $V$ is a deterministic function of $S$ and $Z$ — this is what gives the +principal strata their meaning. + +:::{.panel-tabset group="language"} + +## R + +```{r} +v <- 1*(s == "a") + 0*(s == "n") + z*(s == "c") + (1-z)*(s == "d") +``` + +## Python + +```{python} +v = 1*(s == 'a') + 0*(s == 'n') + z*(s == "c") + (1-z)*(s == "d") +``` + +::: + +### Generate the Outcome + +The outcome structural model — by varying this function we can alter the +identification conditions. Setting it to depend on `zval` violates the exclusion +restriction, and we do so here to illustrate partial identification. + +:::{.panel-tabset group="language"} + +## R + +```{r} +gamfun <- function(xval, vval, zval, sval) { + baseline <- pnorm(2 - xval - 2.5*(xval - 1.5)^2 - 0.5*zval + + 1*(sval == "n") - 1*(sval == "a")) + baseline - 0.5 * vval * baseline +} +y <- rbinom(n, 1, gamfun(X[, 1], v, z, s)) +``` + +## Python + +```{python} +def gamfun(xval, vval, zval, sval): + baseline = norm.cdf(2 - xval - 2.5*(xval - 1.5)**2 - 0.5*zval + + 1*(sval == "n") - 1*(sval == "a")) + return baseline - 0.5 * vval * baseline + +y = rng.binomial(n=1, p=gamfun(X[:, 0], v, z, s), size=n) +``` + +::: + +### Organize the Data + +For the monotone probit model, the observations must be sorted so that $Z=1$ cases +come first. + +:::{.panel-tabset group="language"} + +## R + +```{r} +Xall <- cbind(X, v, z) +p_X <- p_X + 2 +index <- sort(z, decreasing = TRUE, index.return = TRUE) +X <- matrix(X[index$ix, ], ncol = 1) +Xall <- Xall[index$ix, ] +z <- z[index$ix] +v <- v[index$ix] +s <- s[index$ix] +y <- y[index$ix] +x <- x[index$ix] +``` + +## Python + +```{python} +Xall = np.concatenate((X, np.column_stack((v, z))), axis=1) +p_X = p_X + 2 +sort_index = np.argsort(z)[::-1] +X = X[sort_index, :] +Xall = Xall[sort_index, :] +z = z[sort_index] +v = v[sort_index] +s = s[sort_index] +y = y[sort_index] +x = x[sort_index] +``` + +::: + +## Fit the Outcome Model + +We fit a probit BART model for $\Pr(Y=1 \mid V=1, Z=1, X=x)$ using the +Albert–Chib [@albert1993bayesian] data augmentation Gibbs sampler. We initialize the +forest, enter the main loop (alternating: sample forest | sample latent utilities), +and retain all post-warmstart draws. + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| cache: true +num_warmstart <- 10 +num_mcmc <- 1000 +num_samples <- num_warmstart + num_mcmc + +alpha <- 0.95; beta <- 2; min_samples_leaf <- 1; max_depth <- 20 +num_trees <- 50; cutpoint_grid_size <- 100 +tau_init <- 0.5 +leaf_prior_scale <- matrix(tau_init, ncol = 1) +feature_types <- as.integer(c(rep(0, p_X - 2), 1, 1)) +var_weights <- rep(1, p_X) / p_X +outcome_model_type <- 0 + +if (is.null(random_seed)) { + rng_r <- createCppRNG(-1) +} else { + rng_r <- createCppRNG(random_seed) +} + +forest_dataset <- createForestDataset(Xall) +forest_model_config <- createForestModelConfig( + feature_types = feature_types, num_trees = num_trees, + num_features = p_X, num_observations = n, + variable_weights = var_weights, leaf_dimension = 1, + alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, + max_depth = max_depth, leaf_model_type = outcome_model_type, + leaf_model_scale = leaf_prior_scale, + cutpoint_grid_size = cutpoint_grid_size +) +global_model_config <- createGlobalModelConfig(global_error_variance = 1) +forest_model <- createForestModel(forest_dataset, forest_model_config, + global_model_config) +forest_samples <- createForestSamples(num_trees, 1, TRUE, FALSE) +active_forest <- createForest(num_trees, 1, TRUE, FALSE) + +n1 <- sum(y) +zed <- 0.25 * (2 * as.numeric(y) - 1) +outcome <- createOutcome(zed) +active_forest$prepare_for_sampler(forest_dataset, outcome, forest_model, + outcome_model_type, 0.0) +active_forest$adjust_residual(forest_dataset, outcome, forest_model, + FALSE, FALSE) + +gfr_flag <- TRUE +for (i in seq_len(num_samples)) { + if (i > num_warmstart) gfr_flag <- FALSE + forest_model$sample_one_iteration( + forest_dataset, outcome, forest_samples, active_forest, + rng_r, forest_model_config, global_model_config, + keep_forest = TRUE, gfr = gfr_flag + ) + eta <- forest_samples$predict_raw_single_forest(forest_dataset, i - 1) + U1 <- runif(n1, pnorm(0, eta[y == 1], 1), 1) + zed[y == 1] <- qnorm(U1, eta[y == 1], 1) + U0 <- runif(n - n1, 0, pnorm(0, eta[y == 0], 1)) + zed[y == 0] <- qnorm(U0, eta[y == 0], 1) + outcome$update_data(zed) + forest_model$propagate_residual_update(outcome) +} +``` + +## Python + +```{python} +#| cache: true +num_warmstart <- 10 +num_mcmc <- 1000 +num_samples = num_warmstart + num_mcmc + +alpha = 0.95; beta = 2; min_samples_leaf = 1; max_depth = 20 +num_trees = 50; cutpoint_grid_size = 100 +tau_init = 0.5 +leaf_prior_scale = np.array([[tau_init]]) +feature_types = np.append(np.repeat(0, p_X - 2), [1, 1]).astype(int) +var_weights = np.repeat(1.0 / p_X, p_X) +outcome_model_type = 0 + +cpp_rng = RNG(random_seed) if random_seed is not None else RNG() + +forest_dataset = Dataset() +forest_dataset.add_covariates(Xall) + +forest_model_config = ForestModelConfig( + feature_types=feature_types, num_trees=num_trees, + num_features=p_X, num_observations=n, + variable_weights=var_weights, leaf_dimension=1, + alpha=alpha, beta=beta, min_samples_leaf=min_samples_leaf, + max_depth=max_depth, leaf_model_type=outcome_model_type, + leaf_model_scale=leaf_prior_scale, + cutpoint_grid_size=cutpoint_grid_size, +) +global_model_config = GlobalModelConfig(global_error_variance=1.0) +forest_sampler = ForestSampler(forest_dataset, global_model_config, + forest_model_config) +forest_samples = ForestContainer(num_trees, 1, True, False) +active_forest = Forest(num_trees, 1, True, False) + +n1 = int(np.sum(y)) +zed = 0.25 * (2.0 * y - 1.0) +outcome = Residual(zed) +forest_sampler.prepare_for_sampler(forest_dataset, outcome, active_forest, + outcome_model_type, np.array([0.0])) + +gfr_flag = True +for i in range(num_samples): + if i >= num_warmstart: + gfr_flag = False + forest_sampler.sample_one_iteration( + forest_samples, active_forest, forest_dataset, outcome, cpp_rng, + global_model_config, forest_model_config, + keep_forest=True, gfr=gfr_flag, + ) + eta = np.squeeze(forest_samples.predict_raw_single_forest(forest_dataset, i)) + mu0 = eta[y == 0]; mu1 = eta[y == 1] + u0 = rng.uniform(0, norm.cdf(-mu0), size=n - n1) + u1 = rng.uniform(norm.cdf(-mu1), 1, size=n1) + zed[y == 0] = mu0 + norm.ppf(u0) + zed[y == 1] = mu1 + norm.ppf(u1) + outcome.update_data(np.squeeze(zed) - eta) +``` + +::: + +## Fit the Monotone Probit Model + +The monotonicity constraint +$\Pr(V=1 \mid Z=0, X=x) \leq \Pr(V=1 \mid Z=1, X=x)$ is enforced via the +data augmentation of @papakostas2023forecasts. We parameterize: + +$$ +\Pr(V=1 \mid Z=0, X=x) = \Phi_f(x)\,\Phi_h(x), \qquad +\Pr(V=1 \mid Z=1, X=x) = \Phi_f(x), +$$ + +where $\Phi_\mu(x)$ is the normal CDF with mean $\mu(x)$ and variance 1. + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| cache: true +X_h <- as.matrix(X[z == 0, ]) +n0 <- sum(z == 0); n1 <- sum(z == 1) +num_trees_f <- 50; num_trees_h <- 20 +feature_types_mono <- as.integer(rep(0, 1)) +var_weights_mono <- rep(1, 1) +tau_h <- 1 / num_trees_h +leaf_scale_h <- matrix(tau_h, ncol = 1) +leaf_scale_f <- matrix(1 / num_trees_f, ncol = 1) + +forest_dataset_f <- createForestDataset(X) +forest_dataset_h <- createForestDataset(X_h) + +fmc_f <- createForestModelConfig( + feature_types = feature_types_mono, num_trees = num_trees_f, + num_features = ncol(X), num_observations = nrow(X), + variable_weights = var_weights_mono, leaf_dimension = 1, + alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, + max_depth = max_depth, leaf_model_type = 0, + leaf_model_scale = leaf_scale_f, cutpoint_grid_size = cutpoint_grid_size +) +fmc_h <- createForestModelConfig( + feature_types = feature_types_mono, num_trees = num_trees_h, + num_features = ncol(X_h), num_observations = nrow(X_h), + variable_weights = var_weights_mono, leaf_dimension = 1, + alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, + max_depth = max_depth, leaf_model_type = 0, + leaf_model_scale = leaf_scale_h, cutpoint_grid_size = cutpoint_grid_size +) +gmc_mono <- createGlobalModelConfig(global_error_variance = 1) +fm_f <- createForestModel(forest_dataset_f, fmc_f, gmc_mono) +fm_h <- createForestModel(forest_dataset_h, fmc_h, gmc_mono) + +fs_f <- createForestSamples(num_trees_f, 1, TRUE) +fs_h <- createForestSamples(num_trees_h, 1, TRUE) +af_f <- createForest(num_trees_f, 1, TRUE) +af_h <- createForest(num_trees_h, 1, TRUE) + +v1 <- v[z == 1]; v0 <- v[z == 0] +R1 <- rep(NA, n0); R0 <- rep(NA, n0) +R1[v0 == 1] <- 1; R0[v0 == 1] <- 1 +R1[v0 == 0] <- 0; R0[v0 == 0] <- sample(c(0, 1), sum(v0 == 0), replace = TRUE) +vaug <- c(v1, R1) + +z_f <- (2 * as.numeric(vaug) - 1); z_f <- z_f / sd(z_f) +z_h <- (2 * as.numeric(R0) - 1); z_h <- z_h / sd(z_h) +out_f <- createOutcome(z_f); out_h <- createOutcome(z_h) +af_f$prepare_for_sampler(forest_dataset_f, out_f, fm_f, 0, 0.0) +af_h$prepare_for_sampler(forest_dataset_h, out_h, fm_h, 0, 0.0) +af_f$adjust_residual(forest_dataset_f, out_f, fm_f, FALSE, FALSE) +af_h$adjust_residual(forest_dataset_h, out_h, fm_h, FALSE, FALSE) + +gfr_flag <- TRUE +for (i in seq_len(num_samples)) { + if (i > num_warmstart) gfr_flag <- FALSE + fm_f$sample_one_iteration(forest_dataset_f, out_f, fs_f, af_f, + rng_r, fmc_f, gmc_mono, keep_forest = TRUE, gfr = gfr_flag) + fm_h$sample_one_iteration(forest_dataset_h, out_h, fs_h, af_h, + rng_r, fmc_h, gmc_mono, keep_forest = TRUE, gfr = gfr_flag) + + eta_f <- forest_samples_f$predict_raw_single_forest(forest_dataset_f, i - 1) + eta_h <- forest_samples_h$predict_raw_single_forest(forest_dataset_h, i - 1) + + idx0 <- which(v0 == 0) + w1 <- (1 - pnorm(eta_h[idx0])) * (1 - pnorm(eta_f[n1 + idx0])) + w2 <- (1 - pnorm(eta_h[idx0])) * pnorm(eta_f[n1 + idx0]) + w3 <- pnorm(eta_h[idx0]) * (1 - pnorm(eta_f[n1 + idx0])) + s_w <- w1 + w2 + w3 + u <- runif(length(idx0)) + temp <- 1*(u < w1/s_w) + 2*(u > w1/s_w & u < (w1+w2)/s_w) + 3*(u > (w1+w2)/s_w) + R1[v0 == 0] <- 1*(temp == 2); R0[v0 == 0] <- 1*(temp == 3) + vaug <- c(v1, R1) + + U1 <- runif(sum(R0), pnorm(0, eta_h[R0 == 1], 1), 1) + z_h[R0 == 1] <- qnorm(U1, eta_h[R0 == 1], 1) + U0 <- runif(n0 - sum(R0), 0, pnorm(0, eta_h[R0 == 0], 1)) + z_h[R0 == 0] <- qnorm(U0, eta_h[R0 == 0], 1) + + U1 <- runif(sum(vaug), pnorm(0, eta_f[vaug == 1], 1), 1) + z_f[vaug == 1] <- qnorm(U1, eta_f[vaug == 1], 1) + U0 <- runif(n - sum(vaug), 0, pnorm(0, eta_f[vaug == 0], 1)) + z_f[vaug == 0] <- qnorm(U0, eta_f[vaug == 0], 1) + + out_h$update_data(z_h); fm_h$propagate_residual_update(out_h) + out_f$update_data(z_f); fm_f$propagate_residual_update(out_f) +} +``` + +## Python + +```{python} +#| cache: true +X_h = X[z == 0, :] +n0 = int(np.sum(z == 0)); n1 = int(np.sum(z == 1)) +num_trees_f = 50; num_trees_h = 20 +feature_types_mono = np.repeat(0, p_X - 2).astype(int) +var_weights_mono = np.repeat(1.0 / (p_X - 2.0), p_X - 2) +leaf_scale_f = np.array([[1.0 / num_trees_f]]) +leaf_scale_h = np.array([[1.0 / num_trees_h]]) + +forest_dataset_f = Dataset(); forest_dataset_f.add_covariates(X) +forest_dataset_h = Dataset(); forest_dataset_h.add_covariates(X_h) + +fmc_f = ForestModelConfig( + feature_types=feature_types_mono, num_trees=num_trees_f, + num_features=X.shape[1], num_observations=n, + variable_weights=var_weights_mono, leaf_dimension=1, + alpha=alpha, beta=beta, min_samples_leaf=min_samples_leaf, + max_depth=max_depth, leaf_model_type=0, + leaf_model_scale=leaf_scale_f, cutpoint_grid_size=cutpoint_grid_size, +) +fmc_h = ForestModelConfig( + feature_types=feature_types_mono, num_trees=num_trees_h, + num_features=X_h.shape[1], num_observations=n0, + variable_weights=var_weights_mono, leaf_dimension=1, + alpha=alpha, beta=beta, min_samples_leaf=min_samples_leaf, + max_depth=max_depth, leaf_model_type=0, + leaf_model_scale=leaf_scale_h, cutpoint_grid_size=cutpoint_grid_size, +) +gmc_mono = GlobalModelConfig(global_error_variance=1.0) +fs_f = ForestSampler(forest_dataset_f, gmc_mono, fmc_f) +fs_h = ForestSampler(forest_dataset_h, gmc_mono, fmc_h) +forest_samples_f = ForestContainer(num_trees_f, 1, True, False) +forest_samples_h = ForestContainer(num_trees_h, 1, True, False) +af_f = Forest(num_trees_f, 1, True, False) +af_h = Forest(num_trees_h, 1, True, False) + +v1 = v[z == 1]; v0 = v[z == 0] +R1 = np.empty(n0); R0 = np.empty(n0) +R1[v0 == 1] = 1; R0[v0 == 1] = 1 +nv0 = int(np.sum(v0 == 0)) +R1[v0 == 0] = 0; R0[v0 == 0] = rng.choice([0, 1], size=nv0) +vaug = np.append(v1, R1) +z_f = (2.0 * vaug - 1.0); z_f = z_f / np.std(z_f) +z_h = (2.0 * R0 - 1.0); z_h = z_h / np.std(z_h) +out_f = Residual(z_f); out_h = Residual(z_h) +fs_f.prepare_for_sampler(forest_dataset_f, out_f, af_f, 0, np.array([0.0])) +fs_h.prepare_for_sampler(forest_dataset_h, out_h, af_h, 0, np.array([0.0])) + +gfr_flag = True +for i in range(num_samples): + if i >= num_warmstart: + gfr_flag = False + fs_f.sample_one_iteration(forest_samples_f, af_f, forest_dataset_f, out_f, + cpp_rng, gmc_mono, fmc_f, keep_forest=True, gfr=gfr_flag) + fs_h.sample_one_iteration(forest_samples_h, af_h, forest_dataset_h, out_h, + cpp_rng, gmc_mono, fmc_h, keep_forest=True, gfr=gfr_flag) + + eta_f = np.squeeze(forest_samples_f.predict_raw_single_forest(forest_dataset_f, i)) + eta_h = np.squeeze(forest_samples_h.predict_raw_single_forest(forest_dataset_h, i)) + + idx0 = np.where(v0 == 0)[0] + w1 = (1 - norm.cdf(eta_h[idx0])) * (1 - norm.cdf(eta_f[n1 + idx0])) + w2 = (1 - norm.cdf(eta_h[idx0])) * norm.cdf(eta_f[n1 + idx0]) + w3 = norm.cdf(eta_h[idx0]) * (1 - norm.cdf(eta_f[n1 + idx0])) + s_w = w1 + w2 + w3 + u = rng.uniform(size=len(idx0)) + temp = 1*(u < w1/s_w) + 2*((u > w1/s_w) & (u < (w1+w2)/s_w)) + 3*(u > (w1+w2)/s_w) + R1[v0 == 0] = (temp == 2).astype(float) + R0[v0 == 0] = (temp == 3).astype(float) + vaug = np.append(v1, R1) + + mu1 = eta_h[R0 == 1] + z_h[R0 == 1] = mu1 + norm.ppf(rng.uniform(norm.cdf(-mu1), 1, size=int(np.sum(R0)))) + mu0 = eta_h[R0 == 0] + z_h[R0 == 0] = mu0 + norm.ppf(rng.uniform(0, norm.cdf(-mu0), size=n0 - int(np.sum(R0)))) + + mu1 = eta_f[vaug == 1] + z_f[vaug == 1] = mu1 + norm.ppf(rng.uniform(norm.cdf(-mu1), 1, size=int(np.sum(vaug)))) + mu0 = eta_f[vaug == 0] + z_f[vaug == 0] = mu0 + norm.ppf(rng.uniform(0, norm.cdf(-mu0), size=n - int(np.sum(vaug)))) + + out_h.update_data(np.squeeze(z_h) - eta_h) + out_f.update_data(np.squeeze(z_f) - eta_f) +``` + +::: + +## Extracting Estimates and Plotting + +We compute the true $ITT_c$ and LATE functions on a prediction grid, then extract +posterior predictions and plot credible bands. + +### Prediction Grid and Truth + +:::{.panel-tabset group="language"} + +## R + +```{r} +ngrid <- 200 +xgrid <- seq(0.1, 2.5, length.out = ngrid) +X_11 <- cbind(xgrid, rep(1, ngrid), rep(1, ngrid)) +X_00 <- cbind(xgrid, rep(0, ngrid), rep(0, ngrid)) +X_01 <- cbind(xgrid, rep(0, ngrid), rep(1, ngrid)) +X_10 <- cbind(xgrid, rep(1, ngrid), rep(0, ngrid)) + +pi_strat <- pi_s(xgrid) +w_a <- pi_strat[, 1]; w_n <- pi_strat[, 2]; w_c <- pi_strat[, 3] + +p11_true <- (w_c/(w_a+w_c))*gamfun(xgrid,1,1,"c") + (w_a/(w_a+w_c))*gamfun(xgrid,1,1,"a") +p00_true <- (w_c/(w_n+w_c))*gamfun(xgrid,0,0,"c") + (w_n/(w_n+w_c))*gamfun(xgrid,0,0,"n") +itt_c_true <- gamfun(xgrid, 1, 1, "c") - gamfun(xgrid, 0, 0, "c") +LATE_true0 <- gamfun(xgrid, 1, 0, "c") - gamfun(xgrid, 0, 0, "c") +LATE_true1 <- gamfun(xgrid, 1, 1, "c") - gamfun(xgrid, 0, 1, "c") +``` + +## Python + +```{python} +ngrid = 200 +xgrid = np.linspace(0.1, 2.5, ngrid) +X_11 = np.column_stack((xgrid, np.ones(ngrid), np.ones(ngrid))) +X_00 = np.column_stack((xgrid, np.zeros(ngrid), np.zeros(ngrid))) +X_01 = np.column_stack((xgrid, np.zeros(ngrid), np.ones(ngrid))) +X_10 = np.column_stack((xgrid, np.ones(ngrid), np.zeros(ngrid))) + +pi_strat = pi_s(xgrid, alpha_a, beta_a, alpha_n, beta_n, alpha_c, beta_c) +w_a = pi_strat[:, 0]; w_n = pi_strat[:, 1]; w_c = pi_strat[:, 2] + +p11_true = (w_c/(w_a+w_c))*gamfun(xgrid,1,1,"c") + (w_a/(w_a+w_c))*gamfun(xgrid,1,1,"a") +p00_true = (w_c/(w_n+w_c))*gamfun(xgrid,0,0,"c") + (w_n/(w_n+w_c))*gamfun(xgrid,0,0,"n") +itt_c_true = gamfun(xgrid, 1, 1, "c") - gamfun(xgrid, 0, 0, "c") +LATE_true0 = gamfun(xgrid, 1, 0, "c") - gamfun(xgrid, 0, 0, "c") +LATE_true1 = gamfun(xgrid, 1, 1, "c") - gamfun(xgrid, 0, 1, "c") +``` + +::: + +### Extract Posterior Predictions + +:::{.panel-tabset group="language"} + +## R + +```{r} +fd_grid <- createForestDataset(as.matrix(xgrid)) +fd_11 <- createForestDataset(X_11) +fd_00 <- createForestDataset(X_00) +fd_01 <- createForestDataset(X_01) +fd_10 <- createForestDataset(X_10) + +phat_11 <- pnorm(forest_samples$predict(fd_11)) +phat_00 <- pnorm(forest_samples$predict(fd_00)) +phat_01 <- pnorm(forest_samples$predict(fd_01)) +phat_10 <- pnorm(forest_samples$predict(fd_10)) +phat_ac <- pnorm(fs_f$predict(fd_grid)) +phat_a <- phat_ac * pnorm(fs_h$predict(fd_grid)) +phat_c <- phat_ac - phat_a +phat_n <- 1 - phat_ac +``` + +## Python + +```{python} +def make_dataset(mat): + ds = Dataset() + ds.add_covariates(mat) + return ds + +fd_grid = make_dataset(np.expand_dims(xgrid, 1)) +fd_11 = make_dataset(X_11); fd_00 = make_dataset(X_00) +fd_01 = make_dataset(X_01); fd_10 = make_dataset(X_10) + +phat_11 = norm.cdf(forest_samples.predict(fd_11)) +phat_00 = norm.cdf(forest_samples.predict(fd_00)) +phat_01 = norm.cdf(forest_samples.predict(fd_01)) +phat_10 = norm.cdf(forest_samples.predict(fd_10)) +phat_ac = norm.cdf(forest_samples_f.predict(fd_grid)) +phat_a = phat_ac * norm.cdf(forest_samples_h.predict(fd_grid)) +phat_c = phat_ac - phat_a +phat_n = 1 - phat_ac +``` + +::: + +### Model Fit Diagnostics + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| fig-cap: "Fitted vs. true conditional outcome probabilities." +par(mfrow = c(1, 2)) +plot(p11_true, rowMeans(phat_11), pch = 20, cex = 0.5, bty = "n", + xlab = "True p11", ylab = "Fitted p11") +abline(0, 1, col = "red") +plot(p00_true, rowMeans(phat_00), pch = 20, cex = 0.5, bty = "n", + xlab = "True p00", ylab = "Fitted p00") +abline(0, 1, col = "red") +``` + +## Python + +```{python} +#| fig-cap: "Fitted vs. true conditional outcome probabilities." +fig, (ax1, ax2) = plt.subplots(1, 2) +ax1.scatter(p11_true, np.mean(phat_11, axis=1), color="black", s=5) +ax1.axline((0, 0), slope=1, color="red", linestyle=(0, (3, 3))) +ax2.scatter(p00_true, np.mean(phat_00, axis=1), color="black", s=5) +ax2.axline((0, 0), slope=1, color="red", linestyle=(0, (3, 3))) +plt.show() +``` + +::: + +### Construct and Plot the $ITT_c$ + +We center the posterior on the identified interval at the value implied by a valid +exclusion restriction, then construct credible bands for the $ITT_c$ and compare +to the LATE. + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| cache: true +#| fig-cap: "Posterior credible bands for the ITT_c (gold/brown) and LATE (gray/blue) compared to the true ITT_c (solid black), LATE_z0 (dotted), and LATE_z1 (dashed)." +ss <- 6 +itt_c <- late <- matrix(NA, ngrid, ncol(phat_c)) + +for (j in seq_len(ncol(phat_c))) { + gamest11 <- ((phat_a[,j]+phat_c[,j])/phat_c[,j])*phat_11[,j] - + phat_10[,j]*phat_a[,j]/phat_c[,j] + lower11 <- pmax(0, ((phat_a[,j]+phat_c[,j])/phat_c[,j])*phat_11[,j] - + phat_a[,j]/phat_c[,j]) + upper11 <- pmin(1, ((phat_a[,j]+phat_c[,j])/phat_c[,j])*phat_11[,j]) + m11 <- (gamest11 - lower11)/(upper11 - lower11) + a1 <- ss*m11; b1 <- ss*(1 - m11) + a1[m11 < 0] <- 1; b1[m11 < 0] <- 5 + a1[m11 > 1] <- 5; b1[m11 > 1] <- 1 + + gamest00 <- ((phat_n[,j]+phat_c[,j])/phat_c[,j])*phat_00[,j] - + phat_01[,j]*phat_n[,j]/phat_c[,j] + lower00 <- pmax(0, ((phat_n[,j]+phat_c[,j])/phat_c[,j])*phat_00[,j] - + phat_n[,j]/phat_c[,j]) + upper00 <- pmin(1, ((phat_n[,j]+phat_c[,j])/phat_c[,j])*phat_00[,j]) + m00 <- (gamest00 - lower00)/(upper00 - lower00) + a0 <- ss*m00; b0 <- ss*(1 - m00) + a0[m00 < 0] <- 1; b0[m00 < 0] <- 5 + a0[m00 > 1] <- 5; b0[m00 > 1] <- 1 + + itt_c[,j] <- lower11 + (upper11-lower11)*rbeta(ngrid, a1, b1) - + (lower00 + (upper00-lower00)*rbeta(ngrid, a0, b0)) + late[,j] <- gamest11 - gamest00 +} + +upperq <- apply(itt_c, 1, quantile, 0.975) +lowerq <- apply(itt_c, 1, quantile, 0.025) +upperq_er <- apply(late, 1, quantile, 0.975, na.rm = TRUE) +lowerq_er <- apply(late, 1, quantile, 0.025, na.rm = TRUE) + +plot(xgrid, itt_c_true, type = "n", ylim = c(-0.75, 0.05), bty = "n", + xlab = "x", ylab = "Treatment effect") +polygon(c(xgrid, rev(xgrid)), c(lowerq, rev(upperq)), + col = rgb(0.5, 0.25, 0, 0.25), border = FALSE) +polygon(c(xgrid, rev(xgrid)), c(lowerq_er, rev(upperq_er)), + col = rgb(0, 0, 0.5, 0.25), border = FALSE) +lines(xgrid, rowMeans(late), col = "slategray", lwd = 3) +lines(xgrid, rowMeans(itt_c), col = "goldenrod1", lwd = 1) +lines(xgrid, LATE_true0, col = "black", lwd = 2, lty = 3) +lines(xgrid, LATE_true1, col = "black", lwd = 2, lty = 2) +lines(xgrid, itt_c_true, col = "black", lwd = 1) +``` + +## Python + +```{python} +#| cache: true +#| fig-cap: "Posterior credible bands for ITT_c and LATE compared to the true functions." +ss = 6 +itt_c = np.empty((ngrid, phat_c.shape[1])) +late = np.empty((ngrid, phat_c.shape[1])) + +for j in range(phat_c.shape[1]): + gamest11 = ((phat_a[:,j]+phat_c[:,j])/phat_c[:,j])*phat_11[:,j] - \ + phat_10[:,j]*phat_a[:,j]/phat_c[:,j] + lower11 = np.maximum(0., ((phat_a[:,j]+phat_c[:,j])/phat_c[:,j])*phat_11[:,j] - + phat_a[:,j]/phat_c[:,j]) + upper11 = np.minimum(1., ((phat_a[:,j]+phat_c[:,j])/phat_c[:,j])*phat_11[:,j]) + m11 = (gamest11 - lower11) / (upper11 - lower11) + a1 = ss * m11; b1 = ss * (1 - m11) + a1[m11 < 0] = 1; b1[m11 < 0] = 5 + a1[m11 > 1] = 5; b1[m11 > 1] = 1 + + gamest00 = ((phat_n[:,j]+phat_c[:,j])/phat_c[:,j])*phat_00[:,j] - \ + phat_01[:,j]*phat_n[:,j]/phat_c[:,j] + lower00 = np.maximum(0., ((phat_n[:,j]+phat_c[:,j])/phat_c[:,j])*phat_00[:,j] - + phat_n[:,j]/phat_c[:,j]) + upper00 = np.minimum(1., ((phat_n[:,j]+phat_c[:,j])/phat_c[:,j])*phat_00[:,j]) + m00 = (gamest00 - lower00) / (upper00 - lower00) + a0 = ss * m00; b0 = ss * (1 - m00) + a0[m00 < 0] = 1; b0[m00 < 0] = 5 + a0[m00 > 1] = 5; b0[m00 > 1] = 1 + + itt_c[:, j] = lower11 + (upper11-lower11)*rng.beta(a1, b1, ngrid) - \ + (lower00 + (upper00-lower00)*rng.beta(a0, b0, ngrid)) + late[:, j] = gamest11 - gamest00 + +upperq = np.quantile(itt_c, 0.975, axis=1) +lowerq = np.quantile(itt_c, 0.025, axis=1) +upperq_er = np.quantile(late, 0.975, axis=1) +lowerq_er = np.quantile(late, 0.025, axis=1) + +plt.plot(xgrid, itt_c_true, color="black") +plt.ylim(-0.75, 0.05) +plt.fill(np.append(xgrid, xgrid[::-1]), np.append(lowerq, upperq[::-1]), + color=(0.5, 0.5, 0, 0.25)) +plt.fill(np.append(xgrid, xgrid[::-1]), np.append(lowerq_er, upperq_er[::-1]), + color=(0, 0, 0.5, 0.25)) +plt.plot(xgrid, np.mean(late, axis=1), color="darkgrey") +plt.plot(xgrid, np.mean(itt_c, axis=1), color="gold") +plt.plot(xgrid, LATE_true0, color="black", linestyle=(0, (2, 2))) +plt.plot(xgrid, LATE_true1, color="black", linestyle=(0, (4, 4))) +plt.show() +``` + +::: + +With a valid exclusion restriction the three black curves would all be identical. +Without it, the direct effect of $Z$ on $Y$ causes them to diverge. Specifically, the +$ITT_c$ (gold) compares getting the vaccine *and* the reminder to not getting either — +when both reduce risk, we see a larger overall reduction. The two LATE effects compare +the isolated impact of the vaccine among those who did and did not receive the reminder, +respectively. + +## References diff --git a/vignettes/multi-chain.qmd b/vignettes/multi-chain.qmd new file mode 100644 index 000000000..fd42d45b3 --- /dev/null +++ b/vignettes/multi-chain.qmd @@ -0,0 +1,125 @@ +--- +title: "Multi-Chain Inference" +--- + +Running multiple independent MCMC chains with `stochtree` and combining +their results for improved mixing diagnostics and more robust posterior summaries. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/multi_chain.ipynb` +- **R**: pkgdown article `MultiChain` +::: + +## Background + +Running a single MCMC chain can be fragile. Multiple independent chains allow: + +- **Convergence diagnostics**: $\hat{R}$ (potential scale reduction factor) detects + non-convergence across chains. +- **Parallelism**: Chains can run concurrently on multiple cores. +- **Ensemble pooling**: Posterior samples from all chains can be pooled for final inference. + +## Setup + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +library(stochtree) +library(doParallel) +``` + +## Python + +```{python} +#| eval: false +import stochtree +import numpy as np +from multiprocessing import Pool +``` + +:::: + +## Data Simulation + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Simulate data +``` + +## Python + +```{python} +#| eval: false +# Simulate data +``` + +:::: + +## Running Multiple Chains + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Run chains in parallel with doParallel +``` + +## Python + +```{python} +#| eval: false +# Run chains sequentially or with multiprocessing +``` + +:::: + +## Convergence Diagnostics + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Compute R-hat, trace plots +``` + +## Python + +```{python} +#| eval: false +# Compute R-hat, trace plots +``` + +:::: + +## Combining Chain Results + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Pool posterior samples across chains +``` + +## Python + +```{python} +#| eval: false +# Pool posterior samples across chains +``` + +:::: diff --git a/vignettes/multivariate-bcf.qmd b/vignettes/multivariate-bcf.qmd new file mode 100644 index 000000000..51cb9df8a --- /dev/null +++ b/vignettes/multivariate-bcf.qmd @@ -0,0 +1,53 @@ +--- +title: "Multivariate Treatment BCF" +--- + +BCF extended to vector-valued (multivariate) treatments, estimating heterogeneous +effects for multiple treatment arms simultaneously. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/multivariate_treatment_causal_inference.ipynb` +::: + +## Background + +When treatments are multivariate — such as continuous dose vectors or multiple +binary arms — the standard BCF model extends to: + +$$ +Y_i = \mu(X_i) + \tau(X_i)^\top Z_i + \epsilon_i +$$ + +where $Z_i \in \mathbb{R}^p$ and $\tau(X_i) \in \mathbb{R}^p$ is a vector of +covariate-varying treatment effects. + +## Setup + +```{python} +#| eval: false +import stochtree +import numpy as np +``` + +## Data Simulation + +```{python} +#| eval: false +# Simulate multivariate treatment data +``` + +## Model Fitting + +```{python} +#| eval: false +# Fit multivariate BCF +``` + +## Posterior Summaries + +```{python} +#| eval: false +# Per-treatment CATE posteriors +``` diff --git a/vignettes/ordinal-outcome.qmd b/vignettes/ordinal-outcome.qmd new file mode 100644 index 000000000..1a95b8438 --- /dev/null +++ b/vignettes/ordinal-outcome.qmd @@ -0,0 +1,53 @@ +--- +title: "Ordinal Outcome Modeling" +--- + +BART for ordinal (ordered categorical) responses, using a latent variable +formulation with learned cutpoints. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/ordinal_outcome.ipynb` +::: + +## Background + +For an ordinal response $Y_i \in \{1, 2, \ldots, K\}$, the model introduces a +latent continuous variable $Z_i$ and cutpoints $\gamma_1 < \gamma_2 < \cdots < \gamma_{K-1}$: + +$$ +Y_i = k \iff \gamma_{k-1} < Z_i \leq \gamma_k +$$ + +where $Z_i = \mu(X_i) + \epsilon_i$ with $\epsilon_i \sim \mathcal{N}(0,1)$ and +$\mu(\cdot)$ is a BART ensemble. + +## Setup + +```{python} +#| eval: false +import stochtree +import numpy as np +``` + +## Data Simulation + +```{python} +#| eval: false +# Simulate ordinal outcome data +``` + +## Model Fitting + +```{python} +#| eval: false +# Fit ordinal BART +``` + +## Posterior Summaries + +```{python} +#| eval: false +# Category probabilities, cumulative probabilities +``` diff --git a/vignettes/prior-calibration.qmd b/vignettes/prior-calibration.qmd new file mode 100644 index 000000000..91695c369 --- /dev/null +++ b/vignettes/prior-calibration.qmd @@ -0,0 +1,62 @@ +--- +title: "Prior Calibration" +--- + +Choosing and calibrating the BART tree priors — including the branching process +prior on tree structure and the leaf parameter prior — to match prior beliefs +about the scale of the outcome. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **R**: pkgdown article `PriorCalibration` +::: + +## Background + +BART uses two types of priors: + +1. **Tree structure prior**: Controls tree depth via $P(\text{split at depth } d) = \alpha (1 + d)^{-\beta}$. + Defaults: $\alpha = 0.95$, $\beta = 2$. + +2. **Leaf prior**: $\mu_{ij} \sim \mathcal{N}(0, \sigma_\mu^2 / m)$ where $m$ is the number + of trees. The scale $\sigma_\mu$ is typically calibrated so that $m \cdot \sigma_\mu$ + spans the observed outcome range. + +3. **Error variance prior**: $\sigma^2 \sim \text{Inv-}\chi^2(\nu, \lambda)$ with $\nu$ and + $\lambda$ calibrated from a preliminary linear model fit. + +## Setup + +```{r} +#| eval: false +library(stochtree) +``` + +## Default Priors + +```{r} +#| eval: false +# Examine default prior settings +``` + +## Calibrating the Leaf Scale + +```{r} +#| eval: false +# Automatic calibration of sigma_mu based on outcome range +``` + +## Calibrating the Error Variance Prior + +```{r} +#| eval: false +# Calibrate nu and lambda from a linear model +``` + +## Sensitivity Analysis + +```{r} +#| eval: false +# Compare posteriors under different prior choices +``` diff --git a/vignettes/rdd.qmd b/vignettes/rdd.qmd new file mode 100644 index 000000000..6df9bec85 --- /dev/null +++ b/vignettes/rdd.qmd @@ -0,0 +1,472 @@ +--- +title: "Regression Discontinuity Design (RDD) with stochtree" +author: + - name: Rafael Alcantara + affiliation: University of Texas at Austin + - name: P. Richard Hahn + affiliation: Arizona State University + - name: Drew Herren + affiliation: University of Texas at Austin +date: today +bibliography: R/RDD/rdd.bib +--- + +::: {.hidden} +$$ +\newcommand{\ind}{\perp \!\!\! \perp} +\newcommand{\B}{\mathcal{B}} +\newcommand{\res}{\mathbf{r}} +\newcommand{\m}{\mathbf{m}} +\newcommand{\x}{\mathbf{x}} +\newcommand{\N}{\mathrm{N}} +\newcommand{\w}{\mathrm{w}} +\newcommand{\iidsim}{\stackrel{\mathrm{iid}}{\sim}} +\newcommand{\V}{\mathbb{V}} +\newcommand{\F}{\mathbf{F}} +\newcommand{\Y}{\mathbf{Y}} +$$ +::: + +```{r} +#| include: false +reticulate::use_python( + Sys.getenv("RETICULATE_PYTHON", unset = Sys.which("python3")), + required = TRUE +) +``` + +## Introduction + +We study conditional average treatment effect (CATE) estimation for regression +discontinuity designs (RDD), in which treatment assignment is based on whether a +particular covariate — referred to as the running variable — lies above or below a +known value, referred to as the cutoff value. Because treatment is deterministically +assigned as a known function of the running variable, RDDs are trivially deconfounded: +treatment assignment is independent of the outcome variable, given the running variable. +However, estimation of treatment effects in RDDs is more complicated than simply +controlling for the running variable, because doing so introduces a complete lack of +overlap. Nonetheless, the CATE _at the cutoff_, $X=c$, may still be identified +provided the conditional expectation $E[Y \mid X,W]$ is continuous at that point for +_all_ $W=w$. We exploit this assumption with the leaf regression BART model implemented +in stochtree, which allows us to define an explicit prior on the CATE. + +## Regression Discontinuity Design + +We conceptualize the treatment effect estimation problem via a quartet of random +variables $(Y, X, Z, U)$. The variable $Y$ is the outcome variable; $X$ is the running +variable; $Z$ is the treatment assignment indicator; and $U$ represents additional, +possibly unobserved, causal factors. What makes this an RDD is the stipulation that +$Z = I(X > c)$ for cutoff $c$. We assume $c = 0$ without loss of generality. + +The following figure depicts a causal diagram representing the assumed causal +relationships between these variables. Two key features are: (1) $X$ blocks the +impact of $U$ on $Z$, satisfying the back-door criterion; and (2) $X$ and $U$ are +not descendants of $Z$. + +![A causal directed acyclic graph representing the general structure of a regression discontinuity design problem.](R/RDD/RDD_DAG.png){width=40% fig-align="center"} + +Using this causal diagram, we may express $Y$ as some function of its graph parents +$(X,Z,U)$: $Y = \F(X,Z,U)$. We relate this to the potential outcomes framework via + +$$ +Y^1 = \F(X,1,U), \qquad Y^0 = \F(X,0,U). +$$ + +Defining conditional expectations +$$ +\mu_1(x) = E[Y \mid X=x, Z=1], \qquad \mu_0(x) = E[Y \mid X=x, Z=0], +$$ +the treatment effect function is $\tau(x) = \mu_1(x) - \mu_0(x)$. Because $Z = I(X > 0)$, +we can only learn $\mu_1(x)$ for $X > 0$ and $\mu_0(x)$ for $X < 0$. Overlap is +violated, so the overall ATE $\bar{\tau} = E(\tau(X))$ is unidentified. We instead +estimate $\tau(0) = \mu_1(0) - \mu_0(0)$, which is identified for continuous $X$ +under the assumption that $\mu_1$ and $\mu_0$ are suitably smooth at $x = 0$. + +### Conditional Average Treatment Effects in RDD + +We are concerned with learning not only $\tau(0)$ but also RDD CATEs, +$\tau(0, \w)$ for covariate vector $\w$. Defining potential outcome means + +$$ +\mu_z(x,\w) = E[Y \mid X=x, W=\w, Z=z], +$$ + +our treatment effect function is $\tau(x,\w) = \mu_1(x,\w) - \mu_0(x,\w)$. We +must assume $\mu_1(x,\w)$ and $\mu_0(x,\w)$ are suitably smooth in $x$ for every $\w$. +CATE estimation in RDDs then reduces to estimating $E[Y \mid X=x, W=\w, Z=z]$, for +which we turn to BART. + +## The BARDDT Model + +We propose a BART model where the trees split on $(x,\w)$ but each leaf node parameter +is a vector of regression coefficients tailored to the RDD context. Let $\psi$ denote +the following basis vector: +$$ +\psi(x,z) = \begin{bmatrix} 1 & zx & (1-z)x & z \end{bmatrix}. +$$ + +The prediction function for tree $j$ is defined as $g_j(x, \w, z) = \psi(x, z) \Gamma_{b_j(x, \w)}$ +for leaf-specific regression vector $\Gamma_{b_j} = (\eta_{b_j}, \lambda_{b_j}, \theta_{b_j}, \Delta_{b_j})^t$. +The model for observations in leaf $b_j$ is + +$$ +\Y_{b_j} \mid \Gamma_{b_j}, \sigma^2 \sim \N(\Psi_{b_j} \Gamma_{b_j}, \sigma^2), \qquad +\Gamma_{b_j} \sim \N(0, \Sigma_0), +$$ + +where we set $\Sigma_0 = \frac{0.033}{J}\mathrm{I}$ as a default (for $x$ standardized +to unit variance in-sample). + +This choice of basis entails that the RDD CATE at $\w$, $\tau(0, \w)$, is the sum of +the $\Delta_{b_j(0, \w)}$ elements across all trees: + +$$ +\tau(0, \w) = \sum_{j=1}^J \Delta_{b_j(0, \w)}. +$$ + +The priors on the $\Delta$ coefficients directly regularize the treatment effect. + +The following figures illustrate how BARDDT fits a response surface and estimates CATEs. + +![Two regression trees with splits in $x$ and a single scalar $w$. Node images depict the $g(x,w,z)$ function defined by that node's coefficients. The vertical gap between line segments at $x=0$ is that node's contribution to the CATE.](R/RDD/trees1.png){width=70% fig-align="center"} + +![The same two trees represented as a partition of the $x$-$w$ plane. The bottom figure shows the combined partition; the red dashed line marks $W=w^*$.](R/RDD/trees2.png){width=70% fig-align="center"} + +![Left: the function fit at $W = w^*$ for the two trees, superimposed. Right: the aggregated fit. The magnitude of the discontinuity at $x = 0$ is the treatment effect.](R/RDD/trees3.png){width=70% fig-align="center"} + +An interesting property of BARDDT: by letting the regression trees split on the running +variable, there is no need to separately define a bandwidth as in polynomial RDD. The +regression trees automatically determine (in the course of posterior sampling) when to +prune away regions far from the cutoff. + +## Demo + +In this section, we provide code for implementing BARDDT in `stochtree` on a +popular RDD dataset. + +### Load Libraries + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| message: false +library(stochtree) +library(rpart) +library(rpart.plot) +library(foreach) +library(doParallel) +``` + +## Python + +```{python} +import matplotlib.pyplot as plt +import seaborn as sns +import numpy as np +import pandas as pd +from sklearn.tree import DecisionTreeRegressor, plot_tree +from stochtree import BARTModel +``` + +::: + +### Dataset + +The data comes from @lindo2010ability, who analyze data on college students at a large +Canadian university to evaluate an academic probation policy. Students whose GPA falls +below a threshold are placed on academic probation. The running variable $X$ is the +negative distance between a student's previous-term GPA and the probation threshold, so +students on probation ($Z = 1$) have positive scores and the cutoff is 0. The outcome +$Y$ is the student's GPA at the end of the current term. Potential moderators $W$ are: +gender (`male`), age at university entry (`age_at_entry`), a dummy for being born in +North America (`bpl_north_america`), credits taken in the first year +(`totcredits_year1`), campus indicators (`loc_campus` 1–3), and high school GPA +quantile (`hsgrade_pct`). + +:::{.panel-tabset group="language"} + +## R + +```{r} +data <- read.csv("https://raw.githubusercontent.com/rdpackages-replication/CIT_2024_CUP/refs/heads/main/CIT_2024_CUP_discrete.csv") +y <- data$nextGPA +x <- data$X +x <- x / sd(x) +w <- data[, 4:11] +w$totcredits_year1 <- factor(w$totcredits_year1, ordered = TRUE) +w$male <- factor(w$male, ordered = FALSE) +w$bpl_north_america <- factor(w$bpl_north_america, ordered = FALSE) +w$loc_campus1 <- factor(w$loc_campus1, ordered = FALSE) +w$loc_campus2 <- factor(w$loc_campus2, ordered = FALSE) +w$loc_campus3 <- factor(w$loc_campus3, ordered = FALSE) +c <- 0 +n <- nrow(data) +z <- as.numeric(x > c) +h <- 0.1 +test <- -h < x & x < h +ntest <- sum(test) +``` + +## Python + +```{python} +data = pd.read_csv("https://raw.githubusercontent.com/rdpackages-replication/CIT_2024_CUP/refs/heads/main/CIT_2024_CUP_discrete.csv") +y = data.loc[:, "nextGPA"].to_numpy() +x = data.loc[:, "X"].to_numpy() +x = x / np.std(x) +w = data.iloc[:, 3:11] + +ordered_cat = pd.api.types.CategoricalDtype(ordered=True) +unordered_cat = pd.api.types.CategoricalDtype(ordered=False) +w.loc[:, "totcredits_year1"] = w.loc[:, "totcredits_year1"].astype(ordered_cat) +w.loc[:, "male"] = w.loc[:, "male"].astype(unordered_cat) +w.loc[:, "bpl_north_america"] = w.loc[:, "bpl_north_america"].astype(unordered_cat) +w.loc[:, "loc_campus1"] = w.loc[:, "loc_campus1"].astype(unordered_cat) +w.loc[:, "loc_campus2"] = w.loc[:, "loc_campus2"].astype(unordered_cat) +w.loc[:, "loc_campus3"] = w.loc[:, "loc_campus3"].astype(unordered_cat) +c = 0 +n = data.shape[0] +z = np.where(x > c, 1.0, 0.0) +h = 0.1 +test = (x > -h) & (x < h) +ntest = int(np.sum(test)) +``` + +::: + +### Target Estimand + +Our estimand is the CATE function at $x = 0$, i.e. $\tau(0, \w)$. To focus on +feasible estimation points, we restrict to observed $\w_i$ such that $|x_i| \leq \delta$ +(here $\delta = 0.1$ after standardizing $X$). Our estimand is therefore + +$$ +\tau(0, \w_i) \quad \forall i \text{ such that } |x_i| \leq \delta. +$$ + +### Implementing BARDDT + +The $\psi$ basis vector for the leaf regression is +$\psi = [1,\, zx,\, (1-z)x,\, z]$, and the training covariate matrix is +$[x,\, W]$. The prediction basis at the cutoff for $Z=1$ and $Z=0$ is + +$$ +\psi_1 = [1, 0, 0, 1], \qquad \psi_0 = [1, 0, 0, 0]. +$$ + +:::{.panel-tabset group="language"} + +## R + +```{r} +fit_barddt <- function(y, x, w, z, test, c, + num_gfr = 2, num_mcmc = 500) { + n <- length(y) + barddt_global <- list(standardize = TRUE, + sample_sigma_global = TRUE, + sigma2_global_init = 0.1) + barddt_mean <- list(num_trees = 50, min_samples_leaf = 20, + alpha = 0.95, beta = 2, max_depth = 20, + sample_sigma2_leaf = FALSE, + sigma2_leaf_init = diag(rep(0.1 / 50, 4))) + B <- cbind(rep(1, n), z * x, (1 - z) * x, z) + B1 <- cbind(rep(1, n), rep(c, n), rep(0, n), rep(1, n))[test, ] + B0 <- cbind(rep(1, n), rep(0, n), rep(c, n), rep(0, n))[test, ] + Xmat <- as.matrix(cbind(rep(0, n), w))[test, ] + fit <- stochtree::bart( + X_train = as.matrix(cbind(x, w)), y_train = y, + leaf_basis_train = B, + mean_forest_params = barddt_mean, + general_params = barddt_global, + num_gfr = num_gfr, num_mcmc = num_mcmc + ) + pred1 <- predict(fit, Xmat, B1)$y_hat + pred0 <- predict(fit, Xmat, B0)$y_hat + pred1 - pred0 +} +``` + +## Python + +```{python} +def estimate_barddt(y, x, w, z, test, c, + num_gfr=2, num_mcmc=100, seed=None): + n = y.shape[0] + global_params = {"standardize": True, + "sample_sigma_global": True, + "sigma2_global_init": 0.1} + if seed is not None: + global_params["random_seed"] = seed + mean_params = {"num_trees": 50, "min_samples_leaf": 20, + "alpha": 0.95, "beta": 2, "max_depth": 20, + "sample_sigma2_leaf": False, + "sigma2_leaf_init": np.diag(np.repeat(0.1 / 150, 4))} + Psi = np.column_stack([np.ones(n), z * x, (1 - z) * x, z]) + Psi1 = np.column_stack([np.ones(n), np.repeat(c, n), + np.zeros(n), np.ones(n)])[test, :] + Psi0 = np.column_stack([np.ones(n), np.zeros(n), + np.repeat(c, n), np.zeros(n)])[test, :] + Xmat = np.column_stack([np.zeros(n), w])[test, :] + model = BARTModel() + model.sample(X_train=np.column_stack([x, w]), y_train=y, + leaf_basis_train=Psi, num_gfr=num_gfr, num_mcmc=num_mcmc, + general_params=global_params, mean_forest_params=mean_params) + return model.predict(Xmat, Psi1) - model.predict(Xmat, Psi0) +``` + +::: + +### Fitting the Model + +We run multiple chains and combine their posterior draws. + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| cache: true +num_chains <- 20 +num_gfr <- 2 +num_mcmc <- 500 + +ncores <- min(5, parallel::detectCores() - 1) +cl <- makeCluster(ncores) +registerDoParallel(cl) + +chain_outputs <- foreach(i = seq_len(num_chains)) %dopar% { + fit_barddt(y, x, w, z, test, c, + num_gfr = num_gfr, num_mcmc = num_mcmc) +} +stopCluster(cl) + +pred <- do.call("cbind", chain_outputs) +``` + +## Python + +```{python} +#| cache: true +num_chains <- 4 +num_mcmc <- 100 +cate_result = np.empty((ntest, num_chains * num_mcmc)) +for i in range(num_chains): + draws = estimate_barddt(y, x, w, z, test, c, + num_gfr=2, num_mcmc=num_mcmc, seed=i) + cate_result[:, (i * num_mcmc):((i + 1) * num_mcmc)] = draws +``` + +::: + +### Analyzing CATE Heterogeneity + +To summarize the CATE posterior we fit a regression tree to the posterior mean +point estimates $\bar{\tau}_i = \frac{1}{M} \sum_{h=1}^M \tau^{(h)}(0, \w_i)$, +using $W$ as predictors. We restrict to observations with $|x_i| \leq \delta$. + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| fig-cap: "Regression tree fit to posterior point estimates of individual treatment effects. Top number in each box is the average subgroup treatment effect; lower number is the share of the sample." +cate <- rpart(y ~ ., data.frame(y = rowMeans(pred), w[test, ]), + control = rpart.control(cp = 0.015)) + +plot_cart <- function(rp) { + fr <- rp$frame + left <- which.min(fr$yval) + right <- which.max(fr$yval) + cols <- rep("lightblue3", nrow(fr)) + cols[fr$yval == fr$yval[left]] <- "tomato3" + cols[fr$yval == fr$yval[right]] <- "gold2" + cols +} + +rpart.plot(cate, main = "", box.col = plot_cart(cate)) +``` + +## Python + +```{python} +#| fig-cap: "Decision tree fit to posterior mean CATEs, used as an effect moderation summary." +y_surrogate = np.mean(cate_result, axis=1) +X_surrogate = w.iloc[test, :] +cate_tree = DecisionTreeRegressor(min_impurity_decrease=0.0001) +cate_tree.fit(X=X_surrogate, y=y_surrogate) +plot_tree(cate_tree, impurity=False, filled=True, + feature_names=w.columns, proportion=False, + label="root", node_ids=True) +plt.show() +``` + +::: + +The resulting tree indicates that course load (`totcredits_year1`) in the academic term +leading to probation is a strong moderator of the treatment effect. The tree also flags +campus, age at entry, and gender as secondary moderators — all prima facie plausible. + +### Comparing Subgroup Posteriors + +The effect moderation tree is a posterior summary tool; it does not alter the +posterior itself. We can compare any two subgroups by averaging their individual +posterior draws. Consider the two groups at opposite ends of the effect range: + +- **Group A**: male student, entered college older than 19, attempted > 4.8 credits in + the first year (leftmost leaf, red) +- **Group B**: any gender, entered college younger than 19, attempted 4.3–4.8 credits + in the first year (rightmost leaf, gold) + +Subgroup posteriors are + +$$ +\bar{\tau}_A^{(h)} = \frac{1}{n_A} \sum_{i \in A} \tau^{(h)}(0, \w_i), +$$ + +where $h$ indexes a posterior draw and $n_A$ is the group size. + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| fig-cap: "Joint kernel density estimate of the CATE posteriors for Groups A and B. Nearly all contour lines lie above the 45° line, indicating that Group B has persistently higher treatment effects." +cate_kde <- function(rp, pred) { + left <- rp$where == which.min(rp$frame$yval) + right <- rp$where == which.max(rp$frame$yval) + cate_a <- colMeans(pred[left, , drop = FALSE]) + cate_b <- colMeans(pred[right, , drop = FALSE]) + MASS::kde2d(cate_a, cate_b, n = 200) +} +contour(cate_kde(cate, pred), bty = "n", + xlab = "Group A", ylab = "Group B") +abline(a = 0, b = 1) +``` + +## Python + +```{python} +#| fig-cap: "Joint KDE of Group A and Group B CATE posteriors. Contours above the diagonal indicate Group B has persistently higher treatment effects." +predicted_nodes = cate_tree.apply(X=X_surrogate) +posterior_group_a = np.mean(cate_result[predicted_nodes == 2, :], axis=0) +posterior_group_b = np.mean(cate_result[predicted_nodes == 6, :], axis=0) +posterior_df = pd.DataFrame({"group_a": posterior_group_a, + "group_b": posterior_group_b}) +sns.kdeplot(data=posterior_df, x="group_b", y="group_a") +plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.show() +``` + +::: + +The contour lines are nearly all above the 45° line, indicating that the posterior +probability mass lies in the region where Group B has a larger treatment effect than +Group A, even after accounting for estimation uncertainty. + +As always, CATEs that vary with observable factors do not necessarily represent a +_causal_ moderating relationship; uncovering these patterns is crucial for suggesting +causal mechanisms to investigate in future studies. + +## References diff --git a/vignettes/serialization.qmd b/vignettes/serialization.qmd new file mode 100644 index 000000000..6226390f0 --- /dev/null +++ b/vignettes/serialization.qmd @@ -0,0 +1,114 @@ +--- +title: "Model Serialization" +--- + +Saving fitted `stochtree` models to disk and reloading them for prediction +or further analysis — without re-running the sampler. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/serialization.ipynb` +- **R**: pkgdown article `ModelSerialization` +::: + +## Setup + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +library(stochtree) +``` + +## Python + +```{python} +#| eval: false +import stochtree +import json +``` + +:::: + +## Fitting a Model + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Fit a BART model to serialize +``` + +## Python + +```{python} +#| eval: false +# Fit a BART model to serialize +``` + +:::: + +## Saving to Disk + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Serialize model to JSON / binary +``` + +## Python + +```{python} +#| eval: false +# Serialize model to JSON / binary +``` + +:::: + +## Loading from Disk + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Load serialized model +``` + +## Python + +```{python} +#| eval: false +# Load serialized model +``` + +:::: + +## Prediction After Loading + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Predict from reloaded model +``` + +## Python + +```{python} +#| eval: false +# Predict from reloaded model +``` + +:::: diff --git a/vignettes/sklearn.qmd b/vignettes/sklearn.qmd new file mode 100644 index 000000000..106b500da --- /dev/null +++ b/vignettes/sklearn.qmd @@ -0,0 +1,64 @@ +--- +title: "Scikit-Learn Interface" +--- + +Using `stochtree` via scikit-learn compatible estimators — +`StochTreeBARTRegressor`, `StochTreeBARTBinaryClassifier`, and +`StochTreeBCFRegressor` — for seamless integration with the sklearn ecosystem +(pipelines, cross-validation, grid search). + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/sklearn_wrappers.ipynb` +::: + +## Setup + +```{python} +#| eval: false +from stochtree.sklearn import ( + StochTreeBARTRegressor, + StochTreeBARTBinaryClassifier, + StochTreeBCFRegressor, +) +import numpy as np +from sklearn.model_selection import cross_val_score +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler +``` + +## Regression with `StochTreeBARTRegressor` + +```{python} +#| eval: false +# Fit and predict +``` + +## Classification with `StochTreeBARTBinaryClassifier` + +```{python} +#| eval: false +# Fit and predict_proba +``` + +## Causal Inference with `StochTreeBCFRegressor` + +```{python} +#| eval: false +# BCF via sklearn interface +``` + +## Using sklearn Pipelines + +```{python} +#| eval: false +# Example pipeline with preprocessing +``` + +## Cross-Validation + +```{python} +#| eval: false +# cross_val_score with a stochtree estimator +``` diff --git a/vignettes/summary-plotting.qmd b/vignettes/summary-plotting.qmd new file mode 100644 index 000000000..66bad6779 --- /dev/null +++ b/vignettes/summary-plotting.qmd @@ -0,0 +1,56 @@ +--- +title: "Summary and Plotting" +--- + +Posterior summary utilities for `stochtree` models: credible intervals, +trace plots, partial dependence plots, and variable importance summaries. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/summary.ipynb` +::: + +## Setup + +```{python} +#| eval: false +import stochtree +import numpy as np +import matplotlib.pyplot as plt +``` + +## Fitting a Model + +```{python} +#| eval: false +# Fit a model to summarize +``` + +## Credible Intervals + +```{python} +#| eval: false +# Pointwise posterior credible intervals +``` + +## Trace Plots + +```{python} +#| eval: false +# MCMC trace plots for sigma^2 and other scalar parameters +``` + +## Partial Dependence Plots + +```{python} +#| eval: false +# Marginal effect of a single covariate +``` + +## Variable Importance + +```{python} +#| eval: false +# Split-based variable importance +``` diff --git a/vignettes/tree-inspection.qmd b/vignettes/tree-inspection.qmd new file mode 100644 index 000000000..cfa84450c --- /dev/null +++ b/vignettes/tree-inspection.qmd @@ -0,0 +1,114 @@ +--- +title: "Tree Inspection" +--- + +Examining the structure and parameters of individual trees within a fitted +`stochtree` ensemble — useful for interpretability and debugging. + +::: {.callout-note} +This vignette is under construction. Content will be ported from: + +- **Python**: `stochtree_repo/demo/notebooks/tree_inspection.ipynb` +- **R**: pkgdown article `TreeInspection` +::: + +## Setup + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +library(stochtree) +``` + +## Python + +```{python} +#| eval: false +import stochtree +import numpy as np +``` + +:::: + +## Fitting a Model + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Fit a model to inspect +``` + +## Python + +```{python} +#| eval: false +# Fit a model to inspect +``` + +:::: + +## Accessing the Forest Container + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Extract forest container from fitted model +``` + +## Python + +```{python} +#| eval: false +# Extract forest container from fitted model +``` + +:::: + +## Inspecting Tree Structure + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Navigate individual trees: split variables, thresholds, leaf values +``` + +## Python + +```{python} +#| eval: false +# Navigate individual trees: split variables, thresholds, leaf values +``` + +:::: + +## Summarizing Split Frequencies + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| eval: false +# Variable importance via split counts +``` + +## Python + +```{python} +#| eval: false +# Variable importance via split counts +``` + +:::: diff --git a/vignettes/vignettes.bib b/vignettes/vignettes.bib new file mode 100644 index 000000000..65a6f152d --- /dev/null +++ b/vignettes/vignettes.bib @@ -0,0 +1,127 @@ +@article{murray2021log, + title={Log-linear Bayesian additive regression trees for multinomial logistic and count regression models}, + author={Murray, Jared S}, + journal={Journal of the American Statistical Association}, + volume={116}, + number={534}, + pages={756--769}, + year={2021}, + publisher={Taylor \& Francis} +} + +@article{pratola2020heteroscedastic, + title={Heteroscedastic BART via multiplicative regression trees}, + author={Pratola, Matthew T and Chipman, Hugh A and George, Edward I and McCulloch, Robert E}, + journal={Journal of Computational and Graphical Statistics}, + volume={29}, + number={2}, + pages={405--417}, + year={2020}, + publisher={Taylor \& Francis} +} + +@article{murray2021log, + title={Log-linear Bayesian additive regression trees for multinomial logistic and count regression models}, + author={Murray, Jared S}, + journal={Journal of the American Statistical Association}, + volume={116}, + number={534}, + pages={756--769}, + year={2021}, + publisher={Taylor \& Francis} +} + +@article{hahn2020bayesian, + title={Bayesian regression tree models for causal inference: Regularization, confounding, and heterogeneous effects (with discussion)}, + author={Hahn, P Richard and Murray, Jared S and Carvalho, Carlos M}, + journal={Bayesian Analysis}, + volume={15}, + number={3}, + pages={965--1056}, + year={2020}, + publisher={International Society for Bayesian Analysis} +} + +@article{chipman2010bart, +author = {Hugh A. Chipman and Edward I. George and Robert E. McCulloch}, +title = {{BART: Bayesian additive regression trees}}, +volume = {4}, +journal = {The Annals of Applied Statistics}, +number = {1}, +publisher = {Institute of Mathematical Statistics}, +pages = {266 -- 298}, +keywords = {Bayesian backfitting, boosting, CART, ‎classification‎, ensemble, MCMC, Nonparametric regression, probit model, random basis, regularizatio, sum-of-trees model, Variable selection, weak learner}, +year = {2010}, +doi = {10.1214/09-AOAS285}, +URL = {https://doi.org/10.1214/09-AOAS285} +} + +@article{he2023stochastic, + title={Stochastic tree ensembles for regularized nonlinear regression}, + author={He, Jingyu and Hahn, P Richard}, + journal={Journal of the American Statistical Association}, + volume={118}, + number={541}, + pages={551--570}, + year={2023}, + publisher={Taylor \& Francis} +} + +@book{pearl2009causality, + title={Causality}, + author={Pearl, Judea}, + year={2009}, + publisher={Cambridge university press} +} + +@book{imbens2015causal, + title={Causal inference in statistics, social, and biomedical sciences}, + author={Imbens, Guido W and Rubin, Donald B}, + year={2015}, + publisher={Cambridge university press} +} + +@inproceedings{krantsevich2023stochastic, + title={Stochastic tree ensembles for estimating heterogeneous effects}, + author={Krantsevich, Nikolay and He, Jingyu and Hahn, P Richard}, + booktitle={International Conference on Artificial Intelligence and Statistics}, + pages={6120--6131}, + year={2023}, + organization={PMLR} +} + +@Article{gramacy2010categorical, + title = {Categorical Inputs, Sensitivity Analysis, Optimization and Importance Tempering with {tgp} Version 2, an {R} Package for Treed Gaussian Process Models}, + author = {Robert B. Gramacy and Matthew Taddy}, + journal = {Journal of Statistical Software}, + year = {2010}, + volume = {33}, + number = {6}, + pages = {1--48}, + url = {https://www.jstatsoft.org/v33/i06/}, + doi = {10.18637/jss.v033.i06}, +} + +@book{gramacy2020surrogates, + title = {Surrogates: {G}aussian Process Modeling, Design and \ + Optimization for the Applied Sciences}, + author = {Robert B. Gramacy}, + publisher = {Chapman Hall/CRC}, + address = {Boca Raton, Florida}, + note = {\url{http://bobby.gramacy.com/surrogates/}}, + year = {2020} +} + +@book{scholkopf2002learning, + title={Learning with kernels: support vector machines, regularization, optimization, and beyond}, + author={Sch{\"o}lkopf, Bernhard and Smola, Alexander J}, + year={2002}, + publisher={MIT press} +} + +@article{alam2025unified, + title={A Unified Bayesian Nonparametric Framework for Ordinal, Survival, and Density Regression Using the Complementary Log-Log Link}, + author={Alam, Entejar and Linero, Antonio R}, + journal={arXiv preprint arXiv:2502.00606}, + year={2025} +} From 6f66300c99d1cc89d7f8e57d7e42284d9c8f558b Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Tue, 17 Mar 2026 18:08:34 -0500 Subject: [PATCH 2/8] Migrating R vignettes to quarto multilingual vignettes --- vignettes/bcf.qmd | 610 ++++++++++++++++++++++++++-- vignettes/custom-sampling.qmd | 314 ++++++++++++--- vignettes/ensemble-kernel.qmd | 208 ++++++++-- vignettes/heteroskedastic.qmd | 684 ++++++++++++++++++++++++++++++-- vignettes/multi-chain.qmd | 646 ++++++++++++++++++++++++++++-- vignettes/ordinal-outcome.qmd | 384 ++++++++++++++++-- vignettes/prior-calibration.qmd | 107 +++-- vignettes/serialization.qmd | 301 ++++++++++++-- vignettes/summary-plotting.qmd | 337 ++++++++++++++-- vignettes/tree-inspection.qmd | 225 +++++++++-- 10 files changed, 3470 insertions(+), 346 deletions(-) diff --git a/vignettes/bcf.qmd b/vignettes/bcf.qmd index ba523733f..a707146c4 100644 --- a/vignettes/bcf.qmd +++ b/vignettes/bcf.qmd @@ -1,129 +1,655 @@ --- title: "BCF: Bayesian Causal Forests" +bibliography: vignettes.bib --- -Bayesian Causal Forests (BCF) for estimating heterogeneous treatment effects, -with separate tree ensembles for the prognostic function and treatment effect. - -::: {.callout-note} -This vignette is under construction. Content will be ported from: - -- **Python**: `stochtree_repo/demo/notebooks/causal_inference.ipynb` -- **R**: pkgdown article `CausalInference` -::: - -## Background - -BCF [@hahn2020bayesian] models the conditional average treatment effect (CATE) +This vignette demonstrates how to use the `bcf()` function for causal inference +(@hahn2020bayesian). BCF models the conditional average treatment effect (CATE) by fitting two separate tree ensembles: $$ Y_i = \mu(X_i) + \tau(X_i) Z_i + \epsilon_i, \quad \epsilon_i \sim \mathcal{N}(0, \sigma^2) $$ -where $\mu(\cdot)$ is a prognostic forest and $\tau(\cdot)$ is a treatment effect forest. -The estimated propensity score $\hat{\pi}(X_i)$ is included as a covariate in $\mu(\cdot)$ -to reduce confounding bias. +where $\mu(\cdot)$ is a prognostic forest and $\tau(\cdot)$ is a treatment effect +forest. The estimated propensity score $\hat{\pi}(X_i)$ is included as a covariate +in $\mu(\cdot)$ to reduce confounding bias. -## Setup +# Setup ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false library(stochtree) ``` ## Python ```{python} -#| eval: false -import stochtree -import numpy as np +# Python implementation coming soon +``` + +:::: + +We also define several simple functions that configure the data generating processes +used in this vignette. + +::::{.panel-tabset group="language"} + +## R + +```{r} +g <- function(x) { + ifelse(x[, 5] == 1, 2, ifelse(x[, 5] == 2, -1, -4)) +} +mu1 <- function(x) { + 1 + g(x) + x[, 1] * x[, 3] +} +mu2 <- function(x) { + 1 + g(x) + 6 * abs(x[, 3] - 1) +} +tau1 <- function(x) { + rep(3, nrow(x)) +} +tau2 <- function(x) { + 1 + 2 * x[, 2] * x[, 4] +} +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# Binary Treatment + +## Demo 1: Nonlinear Outcome Model, Heterogeneous Treatment Effect + +We consider the following data generating process from @hahn2020bayesian: + +\begin{equation*} +\begin{aligned} +y &= \mu(X) + \tau(X) Z + \epsilon\\ +\epsilon &\sim N\left(0,\sigma^2\right)\\ +\mu(X) &= 1 + g(X) + 6 \lvert X_3 - 1 \rvert\\ +\tau(X) &= 1 + 2 X_2 X_4\\ +g(X) &= \mathbb{I}(X_5=1) \times 2 - \mathbb{I}(X_5=2) \times 1 - \mathbb{I}(X_5=3) \times 4\\ +s_{\mu} &= \sqrt{\mathbb{V}(\mu(X))}\\ +\pi(X) &= 0.8 \phi\left(\frac{3\mu(X)}{s_{\mu}}\right) - \frac{X_1}{2} + \frac{2U+1}{20}\\ +X_1,X_2,X_3 &\sim N\left(0,1\right)\\ +X_4 &\sim \text{Bernoulli}(1/2)\\ +X_5 &\sim \text{Categorical}(1/3,1/3,1/3)\\ +U &\sim \text{Uniform}\left(0,1\right)\\ +Z &\sim \text{Bernoulli}\left(\pi(X)\right) +\end{aligned} +\end{equation*} + +### Simulation + +We draw from the DGP defined above. + +::::{.panel-tabset group="language"} + +## R + +```{r} +n <- 500 +snr <- 3 +x1 <- rnorm(n) +x2 <- rnorm(n) +x3 <- rnorm(n) +x4 <- as.numeric(rbinom(n, 1, 0.5)) +x5 <- as.numeric(sample(1:3, n, replace = TRUE)) +X <- cbind(x1, x2, x3, x4, x5) +p <- ncol(X) +mu_x <- mu1(X) +tau_x <- tau2(X) +pi_x <- 0.8 * pnorm((3 * mu_x / sd(mu_x)) - 0.5 * X[, 1]) + 0.05 + runif(n) / 10 +Z <- rbinom(n, 1, pi_x) +E_XZ <- mu_x + Z * tau_x +y <- E_XZ + rnorm(n, 0, 1) * (sd(E_XZ) / snr) +X <- as.data.frame(X) +X$x4 <- factor(X$x4, ordered = TRUE) +X$x5 <- factor(X$x5, ordered = TRUE) + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct * n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- X[test_inds, ] +X_train <- X[train_inds, ] +pi_test <- pi_x[test_inds] +pi_train <- pi_x[train_inds] +Z_test <- Z[test_inds] +Z_train <- Z[train_inds] +y_test <- y[test_inds] +y_train <- y[train_inds] +mu_test <- mu_x[test_inds] +mu_train <- mu_x[train_inds] +tau_test <- tau_x[test_inds] +tau_train <- tau_x[train_inds] +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +### Sampling and Analysis + +#### Warmstart + +We first simulate from an ensemble model of $y \mid X$ using "warm-start" +initialization samples (@krantsevich2023stochastic). This is the default in +`stochtree`. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(keep_every = 5) +prognostic_forest_params <- list(sample_sigma2_leaf = F) +treatment_effect_forest_params <- list(sample_sigma2_leaf = F) +bcf_model_warmstart <- bcf( + X_train = X_train, + Z_train = Z_train, + y_train = y_train, + propensity_train = pi_train, + X_test = X_test, + Z_test = Z_test, + propensity_test = pi_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + prognostic_forest_params = prognostic_forest_params, + treatment_effect_forest_params = treatment_effect_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Inspect the samples initialized with an XBART warm-start. + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot( + rowMeans(bcf_model_warmstart$mu_hat_test), + mu_test, + xlab = "predicted", + ylab = "actual", + main = "Prognostic function" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +plot( + rowMeans(bcf_model_warmstart$tau_hat_test), + tau_test, + xlab = "predicted", + ylab = "actual", + main = "Treatment effect" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +sigma_observed <- var(y - E_XZ) +plot_bounds <- c( + min(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)), + max(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)) +) +plot( + bcf_model_warmstart$sigma2_global_samples, + ylim = plot_bounds, + ylab = "sigma^2", + xlab = "Sample", + main = "Global variance parameter" +) +abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Examine test set interval coverage. + +::::{.panel-tabset group="language"} + +## R + +```{r} +test_lb <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.025) +test_ub <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.975) +cover <- ((test_lb <= tau_x[test_inds]) & + (test_ub >= tau_x[test_inds])) +mean(cover) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +#### BART MCMC without Warmstart + +Next, we simulate from this ensemble model without any warm-start initialization. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 0 +num_burnin <- 2000 +num_mcmc <- 100 +general_params <- list(keep_every = 5) +prognostic_forest_params <- list(sample_sigma2_leaf = F) +treatment_effect_forest_params <- list(sample_sigma2_leaf = F) +bcf_model_root <- bcf( + X_train = X_train, + Z_train = Z_train, + y_train = y_train, + propensity_train = pi_train, + X_test = X_test, + Z_test = Z_test, + propensity_test = pi_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + prognostic_forest_params = prognostic_forest_params, + treatment_effect_forest_params = treatment_effect_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Inspect the samples after burnin. + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot( + rowMeans(bcf_model_root$mu_hat_test), + mu_test, + xlab = "predicted", + ylab = "actual", + main = "Prognostic function" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +plot( + rowMeans(bcf_model_root$tau_hat_test), + tau_test, + xlab = "predicted", + ylab = "actual", + main = "Treatment effect" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +sigma_observed <- var(y - E_XZ) +plot_bounds <- c( + min(c(bcf_model_root$sigma2_global_samples, sigma_observed)), + max(c(bcf_model_root$sigma2_global_samples, sigma_observed)) +) +plot( + bcf_model_root$sigma2_global_samples, + ylim = plot_bounds, + ylab = "sigma^2", + xlab = "Sample", + main = "Global variance parameter" +) +abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Examine test set interval coverage. + +::::{.panel-tabset group="language"} + +## R + +```{r} +test_lb <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.025) +test_ub <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.975) +cover <- ((test_lb <= tau_x[test_inds]) & + (test_ub >= tau_x[test_inds])) +mean(cover) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Demo 2: Linear Outcome Model, Heterogeneous Treatment Effect + +We consider the following data generating process from @hahn2020bayesian: + +\begin{equation*} +\begin{aligned} +y &= \mu(X) + \tau(X) Z + \epsilon\\ +\epsilon &\sim N\left(0,\sigma^2\right)\\ +\mu(X) &= 1 + g(X) + 6 X_1 X_3\\ +\tau(X) &= 1 + 2 X_2 X_4\\ +g(X) &= \mathbb{I}(X_5=1) \times 2 - \mathbb{I}(X_5=2) \times 1 - \mathbb{I}(X_5=3) \times 4\\ +s_{\mu} &= \sqrt{\mathbb{V}(\mu(X))}\\ +\pi(X) &= 0.8 \phi\left(\frac{3\mu(X)}{s_{\mu}}\right) - \frac{X_1}{2} + \frac{2U+1}{20}\\ +X_1,X_2,X_3 &\sim N\left(0,1\right)\\ +X_4 &\sim \text{Bernoulli}(1/2)\\ +X_5 &\sim \text{Categorical}(1/3,1/3,1/3)\\ +U &\sim \text{Uniform}\left(0,1\right)\\ +Z &\sim \text{Bernoulli}\left(\pi(X)\right) +\end{aligned} +\end{equation*} + +### Simulation + +We draw from the DGP defined above. + +::::{.panel-tabset group="language"} + +## R + +```{r} +n <- 500 +snr <- 3 +x1 <- rnorm(n) +x2 <- rnorm(n) +x3 <- rnorm(n) +x4 <- as.numeric(rbinom(n, 1, 0.5)) +x5 <- as.numeric(sample(1:3, n, replace = TRUE)) +X <- cbind(x1, x2, x3, x4, x5) +p <- ncol(X) +mu_x <- mu2(X) +tau_x <- tau2(X) +pi_x <- 0.8 * pnorm((3 * mu_x / sd(mu_x)) - 0.5 * X[, 1]) + 0.05 + runif(n) / 10 +Z <- rbinom(n, 1, pi_x) +E_XZ <- mu_x + Z * tau_x +y <- E_XZ + rnorm(n, 0, 1) * (sd(E_XZ) / snr) +X <- as.data.frame(X) +X$x4 <- factor(X$x4, ordered = TRUE) +X$x5 <- factor(X$x5, ordered = TRUE) + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct * n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- X[test_inds, ] +X_train <- X[train_inds, ] +pi_test <- pi_x[test_inds] +pi_train <- pi_x[train_inds] +Z_test <- Z[test_inds] +Z_train <- Z[train_inds] +y_test <- y[test_inds] +y_train <- y[train_inds] +mu_test <- mu_x[test_inds] +mu_train <- mu_x[train_inds] +tau_test <- tau_x[test_inds] +tau_train <- tau_x[train_inds] +``` + +## Python + +```{python} +# Python implementation coming soon ``` :::: -## Data Simulation +### Sampling and Analysis + +#### Warmstart ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Simulate observational data with heterogeneous treatment effects +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(keep_every = 5) +prognostic_forest_params <- list(sample_sigma2_leaf = F) +treatment_effect_forest_params <- list(sample_sigma2_leaf = F) +bcf_model_warmstart <- bcf( + X_train = X_train, + Z_train = Z_train, + y_train = y_train, + propensity_train = pi_train, + X_test = X_test, + Z_test = Z_test, + propensity_test = pi_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + prognostic_forest_params = prognostic_forest_params, + treatment_effect_forest_params = treatment_effect_forest_params +) ``` ## Python ```{python} -#| eval: false -# Simulate observational data with heterogeneous treatment effects +# Python implementation coming soon ``` :::: -## Propensity Score Estimation +::::{.panel-tabset group="language"} + +## R + +```{r} +plot( + rowMeans(bcf_model_warmstart$mu_hat_test), + mu_test, + xlab = "predicted", + ylab = "actual", + main = "Prognostic function" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +plot( + rowMeans(bcf_model_warmstart$tau_hat_test), + tau_test, + xlab = "predicted", + ylab = "actual", + main = "Treatment effect" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +sigma_observed <- var(y - E_XZ) +plot_bounds <- c( + min(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)), + max(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)) +) +plot( + bcf_model_warmstart$sigma2_global_samples, + ylim = plot_bounds, + ylab = "sigma^2", + xlab = "Sample", + main = "Global variance parameter" +) +abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Examine test set interval coverage. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Estimate propensity scores +test_lb <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.025) +test_ub <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.975) +cover <- ((test_lb <= tau_x[test_inds]) & + (test_ub >= tau_x[test_inds])) +mean(cover) ``` ## Python ```{python} -#| eval: false -# Estimate propensity scores +# Python implementation coming soon ``` :::: -## Model Fitting +#### BART MCMC without Warmstart + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 0 +num_burnin <- 2000 +num_mcmc <- 100 +general_params <- list(keep_every = 5) +prognostic_forest_params <- list(sample_sigma2_leaf = F) +treatment_effect_forest_params <- list(sample_sigma2_leaf = F) +bcf_model_root <- bcf( + X_train = X_train, + Z_train = Z_train, + y_train = y_train, + propensity_train = pi_train, + X_test = X_test, + Z_test = Z_test, + propensity_test = pi_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + prognostic_forest_params = prognostic_forest_params, + treatment_effect_forest_params = treatment_effect_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Fit BCF model +plot( + rowMeans(bcf_model_root$mu_hat_test), + mu_test, + xlab = "predicted", + ylab = "actual", + main = "Prognostic function" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +plot( + rowMeans(bcf_model_root$tau_hat_test), + tau_test, + xlab = "predicted", + ylab = "actual", + main = "Treatment effect" +) +abline(0, 1, col = "red", lty = 3, lwd = 3) +sigma_observed <- var(y - E_XZ) +plot_bounds <- c( + min(c(bcf_model_root$sigma2_global_samples, sigma_observed)), + max(c(bcf_model_root$sigma2_global_samples, sigma_observed)) +) +plot( + bcf_model_root$sigma2_global_samples, + ylim = plot_bounds, + ylab = "sigma^2", + xlab = "Sample", + main = "Global variance parameter" +) +abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ``` ## Python ```{python} -#| eval: false -# Fit BCF model +# Python implementation coming soon ``` :::: -## Posterior Summaries +Examine test set interval coverage. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Summarize CATE posterior +test_lb <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.025) +test_ub <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.975) +cover <- ((test_lb <= tau_x[test_inds]) & + (test_ub >= tau_x[test_inds])) +mean(cover) ``` ## Python ```{python} -#| eval: false -# Summarize CATE posterior +# Python implementation coming soon ``` :::: -## References +# References diff --git a/vignettes/custom-sampling.qmd b/vignettes/custom-sampling.qmd index f518386f7..47f654387 100644 --- a/vignettes/custom-sampling.qmd +++ b/vignettes/custom-sampling.qmd @@ -1,133 +1,351 @@ --- title: "Custom Sampling Routine" +bibliography: vignettes.bib --- -Building a custom Gibbs sampler using the low-level `stochtree` primitives — -`Dataset`, `Residual`, `ForestSampler`, `GlobalVarianceModel`, and -`LeafVarianceModel` — to implement non-standard models and sampling schemes. +While the functions `bart()` and `bcf()` provide simple and performant interfaces for +supervised learning / causal inference, `stochtree` also offers access to many of the +"low-level" data structures that are typically implemented in C++. This low-level +interface is not designed for performance or even simplicity — rather the intent is +to provide a "prototype" interface to the C++ code that doesn't require modifying any +C++. + +# Motivation + +To illustrate when such a prototype interface might be useful, consider the classic +BART algorithm: + +| **INPUT**: $y$, $X$, $\tau$, $\nu$, $\lambda$, $\alpha$, $\beta$ +| **OUTPUT**: $m$ samples of a decision forest with $k$ trees and global variance parameter $\sigma^2$ +| Initialize $\sigma^2$ via a default or a data-dependent calibration exercise +| Initialize "forest 0" with $k$ trees with a single root node, referring to tree $j$'s prediction vector as $f_{0,j}$ +| Compute residual as $r = y - \sum_{j=1}^k f_{0,j}$ +| **FOR** $i$ **IN** $\left\{1,\dots,m\right\}$: +| Initialize forest $i$ from forest $i-1$ +| **FOR** $j$ **IN** $\left\{1,\dots,k\right\}$: +| Add predictions for tree $j$ to residual: $r = r + f_{i,j}$ +| Sample tree $j$ of forest $i$ from $p\left(\mathcal{T}_{i,j} \mid r, \sigma^2\right)$ +| Update residual by removing tree $j$'s predictions: $r = r - f_{i,j}$ +| Sample $\sigma^2$ from $p\left(\sigma^2 \mid r\right)$ +| **RETURN** forests $\left\{1,\dots,m\right\}$ and $\sigma^2$ samples + +This algorithm is implemented in the `bart()` function, but the low-level interface +allows you to customize this loop — for example, to add random effects, modify the +variance model, or implement a novel sampling scheme. + +# Setup -::: {.callout-note} -This vignette is under construction. Content will be ported from: +::::{.panel-tabset group="language"} + +## R + +```{r} +library(stochtree) +``` -- **Python**: `stochtree_repo/demo/notebooks/prototype_interface.ipynb` -- **R**: pkgdown article `CustomSamplingRoutine` -::: +## Python + +```{python} +# Python implementation coming soon +``` -## Background +:::: -The high-level `BARTModel` and `BCFModel` classes handle data preparation, -prior calibration, and sampling internally. The low-level interface exposes -these building blocks so you can: +# Demo 1: Supervised Learning -- Customize which parameters are sampled and in what order -- Insert additional Gibbs steps (e.g. for auxiliary variables) -- Implement novel models that share `stochtree`'s tree ensemble samplers +## Simulation -## Setup +Simulate a simple partitioned linear model. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -library(stochtree) +# Generate the data +n <- 500 +p_X <- 10 +p_W <- 1 +X <- matrix(runif(n*p_X), ncol = p_X) +W <- matrix(runif(n*p_W), ncol = p_W) +f_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (-3*W[,1]) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (-1*W[,1]) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (1*W[,1]) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (3*W[,1]) +) +y <- f_XW + rnorm(n, 0, 1) + +# Standardize outcome +y_bar <- mean(y) +y_std <- sd(y) +resid <- (y-y_bar)/y_std +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Sampling + +Set some parameters that inform the forest and variance parameter samplers. + +::::{.panel-tabset group="language"} + +## R + +```{r} +alpha <- 0.9 +beta <- 1.25 +min_samples_leaf <- 1 +max_depth <- 10 +num_trees <- 100 +cutpoint_grid_size <- 100 +global_variance_init <- 1. +current_sigma2 <- global_variance_init +tau_init <- 1/num_trees +leaf_prior_scale <- as.matrix(ifelse(p_W >= 1, diag(tau_init, p_W), diag(tau_init, 1))) +nu <- 4 +lambda <- 0.5 +a_leaf <- 2. +b_leaf <- 0.5 +leaf_regression <- T +feature_types <- as.integer(rep(0, p_X)) # 0 = numeric +var_weights <- rep(1/p_X, p_X) ``` ## Python ```{python} -#| eval: false -from stochtree import ( - Dataset, - Residual, - ForestSampler, - GlobalVarianceModel, - LeafVarianceModel, - Forest, - ForestContainer, +# Python implementation coming soon +``` + +:::: + +Initialize R-level access to the C++ classes needed to sample our model. + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Data +if (leaf_regression) { + forest_dataset <- createForestDataset(X, W) + outcome_model_type <- 1 + leaf_dimension <- p_W +} else { + forest_dataset <- createForestDataset(X) + outcome_model_type <- 0 + leaf_dimension <- 1 +} +outcome <- createOutcome(resid) + +# Random number generator (std::mt19937) +rng <- createCppRNG() + +# Sampling data structures +forest_model_config <- createForestModelConfig( + feature_types = feature_types, num_trees = num_trees, num_features = p_X, + num_observations = n, variable_weights = var_weights, leaf_dimension = leaf_dimension, + alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, max_depth = max_depth, + leaf_model_type = outcome_model_type, leaf_model_scale = leaf_prior_scale, + cutpoint_grid_size = cutpoint_grid_size ) -import numpy as np +global_model_config <- createGlobalModelConfig(global_error_variance = global_variance_init) +forest_model <- createForestModel(forest_dataset, forest_model_config, global_model_config) + +# "Active forest" (which gets updated by the sample) and +# container of forest samples (which is written to when +# a sample is not discarded due to burn-in / thinning) +if (leaf_regression) { + forest_samples <- createForestSamples(num_trees, 1, F) + active_forest <- createForest(num_trees, 1, F) +} else { + forest_samples <- createForestSamples(num_trees, 1, T) + active_forest <- createForest(num_trees, 1, T) +} + +# Initialize the leaves of each tree in the forest +active_forest$prepare_for_sampler(forest_dataset, outcome, forest_model, outcome_model_type, mean(resid)) +active_forest$adjust_residual(forest_dataset, outcome, forest_model, ifelse(outcome_model_type==1, T, F), F) +``` + +## Python + +```{python} +# Python implementation coming soon ``` :::: -## Preparing Data Structures +Prepare to run the sampler. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Construct Dataset and Residual objects +num_warmstart <- 10 +num_mcmc <- 100 +num_samples <- num_warmstart + num_mcmc +global_var_samples <- c(global_variance_init, rep(0, num_samples)) +leaf_scale_samples <- c(tau_init, rep(0, num_samples)) ``` ## Python ```{python} -#| eval: false -# Construct Dataset and Residual objects +# Python implementation coming soon ``` :::: -## Initializing Forests +Run the grow-from-root sampler to "warm-start" BART (@he2023stochastic). ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Initialize Forest and ForestContainer +for (i in 1:num_warmstart) { + # Sample forest + forest_model$sample_one_iteration( + forest_dataset, outcome, forest_samples, active_forest, rng, + forest_model_config, global_model_config, keep_forest = T, gfr = T + ) + + # Sample global variance parameter + current_sigma2 <- sampleGlobalErrorVarianceOneIteration( + outcome, forest_dataset, rng, nu, lambda + ) + global_var_samples[i+1] <- current_sigma2 + global_model_config$update_global_error_variance(current_sigma2) + + # Sample leaf node variance parameter and update `leaf_prior_scale` + leaf_scale_samples[i+1] <- sampleLeafVarianceOneIteration( + active_forest, rng, a_leaf, b_leaf + ) + leaf_prior_scale[1,1] <- leaf_scale_samples[i+1] + forest_model_config$update_leaf_model_scale(leaf_prior_scale) +} ``` ## Python ```{python} -#| eval: false -# Initialize Forest and ForestContainer +# Python implementation coming soon ``` :::: -## Custom Gibbs Loop +Pick up from the last GFR forest (and associated global variance / leaf scale +parameters) with an MCMC sampler. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Implement a full Gibbs sampler loop +for (i in (num_warmstart+1):num_samples) { + # Sample forest + forest_model$sample_one_iteration( + forest_dataset, outcome, forest_samples, active_forest, rng, + forest_model_config, global_model_config, keep_forest = T, gfr = F + ) + + # Sample global variance parameter + current_sigma2 <- sampleGlobalErrorVarianceOneIteration( + outcome, forest_dataset, rng, nu, lambda + ) + global_var_samples[i+1] <- current_sigma2 + global_model_config$update_global_error_variance(current_sigma2) + + # Sample leaf node variance parameter and update `leaf_prior_scale` + leaf_scale_samples[i+1] <- sampleLeafVarianceOneIteration( + active_forest, rng, a_leaf, b_leaf + ) + leaf_prior_scale[1,1] <- leaf_scale_samples[i+1] + forest_model_config$update_leaf_model_scale(leaf_prior_scale) +} ``` ## Python ```{python} -#| eval: false -# Implement a full Gibbs sampler loop +# Python implementation coming soon ``` :::: -## Extracting Results +Predict and rescale samples. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Extract posterior samples from ForestContainer +# Forest predictions +preds <- forest_samples$predict(forest_dataset)*y_std + y_bar + +# Global error variance +sigma_samples <- sqrt(global_var_samples)*y_std ``` ## Python ```{python} -#| eval: false -# Extract posterior samples from ForestContainer +# Python implementation coming soon ``` :::: + +## Results + +Inspect the initial samples obtained via "grow-from-root" (@he2023stochastic). + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(sigma_samples[1:num_warmstart], ylab="sigma") +plot(rowMeans(preds[,1:num_warmstart]), y, pch=16, + cex=0.75, xlab = "pred", ylab = "actual") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Inspect the BART samples obtained after "warm-starting". + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(sigma_samples[(num_warmstart+1):num_samples], ylab="sigma") +plot(rowMeans(preds[,(num_warmstart+1):num_samples]), y, pch=16, + cex=0.75, xlab = "pred", ylab = "actual") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# References diff --git a/vignettes/ensemble-kernel.qmd b/vignettes/ensemble-kernel.qmd index 10491a937..4e087800e 100644 --- a/vignettes/ensemble-kernel.qmd +++ b/vignettes/ensemble-kernel.qmd @@ -1,62 +1,206 @@ --- title: "Ensemble Kernel" +bibliography: vignettes.bib --- -Using the `stochtree` tree ensemble as a kernel function — extracting the -posterior similarity matrix induced by leaf co-membership across trees and -MCMC samples. +A trained tree ensemble with strong out-of-sample performance admits a natural +motivation for the "distance" between two samples: shared leaf membership. +This vignette demonstrates how to extract a kernel matrix from a fitted `stochtree` +ensemble and use it for Gaussian process inference. -::: {.callout-note} -This vignette is under construction. Content will be ported from: +# Motivation -- **R**: pkgdown article `EnsembleKernel` -::: +We number the leaves in an ensemble from 1 to $s$ (that is, if tree 1 has 3 leaves, +it reserves the numbers 1 - 3, and in turn if tree 2 has 5 leaves, it reserves the +numbers 4 - 8 to label its leaves, and so on). For a dataset with $n$ observations, +we construct the matrix $W$ as follows: -## Background +| Initialize $W$ as a matrix of all zeroes with $n$ rows and as many columns as leaves in the ensemble +| Let `s` = 0 +| **FOR** $j$ **IN** $\left\{1,\dots,m\right\}$: +| Let `num_leaves` be the number of leaves in tree $j$ +| **FOR** $i$ **IN** $\left\{1,\dots,n\right\}$: +| Let `k` be the leaf to which tree $j$ maps observation $i$ +| Set element $W_{i,k+s} = 1$ +| Let `s` = `s + num_leaves` -Each tree in a BART ensemble partitions the covariate space into leaf regions. -Two observations $i$ and $j$ that fall in the same leaf of tree $t$ at sample $s$ -are "similar" according to that tree. Averaging over trees and posterior samples -gives a data-driven kernel: +This sparse matrix $W$ is a matrix representation of the basis predictions of an +ensemble (i.e. integrating out the leaf parameters and just analyzing the leaf +indices). For an ensemble with $m$ trees, we can determine the proportion of trees +that map each observation to the same leaf by computing $W W^T / m$. This can form +the basis for a kernel function used in a Gaussian process regression, as we +demonstrate below. -$$ -K(x_i, x_j) = \frac{1}{Sm} \sum_{s=1}^S \sum_{t=1}^m \mathbf{1}[\ell_t^{(s)}(x_i) = \ell_t^{(s)}(x_j)] -$$ +# Setup -This kernel can be used for clustering, Gaussian process inference, or as a -similarity measure for downstream tasks. +```{r} +library(stochtree) +library(tgp) +library(MASS) +library(Matrix) +library(mvtnorm) +``` + +# Demo 1: Univariate Supervised Learning + +We begin with a simulated example from the `tgp` package (@gramacy2010categorical). +This data generating process (DGP) is non-stationary with a single numeric +covariate. We define a training set and test set and evaluate various approaches +to modeling the out of sample outcome data. + +## Traditional Gaussian Process -## Setup +We can use the `tgp` package to model this data with a classical Gaussian Process. ```{r} -#| eval: false -library(stochtree) +#| results: hide +# Generate the data +X_train <- seq(0,20,length=100) +X_test <- seq(0,20,length=99) +y_train <- (sin(pi*X_train/5) + 0.2*cos(4*pi*X_train/5)) * (X_train <= 9.6) +lin_train <- X_train>9.6; +y_train[lin_train] <- -1 + X_train[lin_train]/10 +y_train <- y_train + rnorm(length(y_train), sd=0.1) +y_test <- (sin(pi*X_test/5) + 0.2*cos(4*pi*X_test/5)) * (X_test <= 9.6) +lin_test <- X_test>9.6; +y_test[lin_test] <- -1 + X_test[lin_test]/10 + +# Fit the GP +model_gp <- bgp(X=X_train, Z=y_train, XX=X_test) +plot(model_gp$ZZ.mean, y_test, xlab = "predicted", ylab = "actual", main = "Gaussian process") +abline(0,1,lwd=2.5,lty=3,col="red") +``` + +Assess the RMSE + +```{r} +sqrt(mean((model_gp$ZZ.mean - y_test)^2)) +``` + +## BART-based Gaussian Process + +```{r} +# Run BART on the data +num_trees <- 200 +sigma_leaf <- 1/num_trees +X_train <- as.data.frame(X_train) +X_test <- as.data.frame(X_test) +colnames(X_train) <- colnames(X_test) <- "x1" +mean_forest_params <- list(num_trees=num_trees, sigma2_leaf_init=sigma_leaf) +bart_model <- bart(X_train=X_train, y_train=y_train, X_test=X_test, mean_forest_params = mean_forest_params) + +# Extract kernels needed for kriging +leaf_mat_train <- computeForestLeafIndices(bart_model, X_train, forest_type = "mean", + forest_inds = bart_model$model_params$num_samples - 1) +leaf_mat_test <- computeForestLeafIndices(bart_model, X_test, forest_type = "mean", + forest_inds = bart_model$model_params$num_samples - 1) +W_train <- sparseMatrix(i=rep(1:length(y_train),num_trees), j=leaf_mat_train + 1, x=1) +W_test <- sparseMatrix(i=rep(1:length(y_test),num_trees), j=leaf_mat_test + 1, x=1) +Sigma_11 <- tcrossprod(W_test) / num_trees +Sigma_12 <- tcrossprod(W_test, W_train) / num_trees +Sigma_22 <- tcrossprod(W_train) / num_trees +Sigma_22_inv <- ginv(as.matrix(Sigma_22)) +Sigma_21 <- t(Sigma_12) + +# Compute mean and covariance for the test set posterior +mu_tilde <- Sigma_12 %*% Sigma_22_inv %*% y_train +Sigma_tilde <- as.matrix((sigma_leaf)*(Sigma_11 - Sigma_12 %*% Sigma_22_inv %*% Sigma_21)) + +# Sample from f(X_test) | X_test, X_train, f(X_train) +gp_samples <- mvtnorm::rmvnorm(1000, mean = mu_tilde, sigma = Sigma_tilde) + +# Compute posterior mean predictions for f(X_test) +yhat_mean_test <- colMeans(gp_samples) +plot(yhat_mean_test, y_test, xlab = "predicted", ylab = "actual", main = "BART Gaussian process") +abline(0,1,lwd=2.5,lty=3,col="red") +``` + +Assess the RMSE + +```{r} +sqrt(mean((yhat_mean_test - y_test)^2)) ``` -## Fitting a Model +# Demo 2: Multivariate Supervised Learning + +We proceed to the simulated "Friedman" dataset, as implemented in `tgp`. + +## Traditional Gaussian Process + +We can use the `tgp` package to model this data with a classical Gaussian Process. ```{r} -#| eval: false -# Fit a BART model +#| results: hide +# Generate the data, add many "noise variables" +n <- 100 +friedman.df <- friedman.1.data(n=n) +train_inds <- sort(sample(1:n, floor(0.8*n), replace = FALSE)) +test_inds <- (1:n)[!((1:n) %in% train_inds)] +X <- as.matrix(friedman.df)[,1:10] +X <- cbind(X, matrix(runif(n*10), ncol = 10)) +y <- as.matrix(friedman.df)[,12] + rnorm(n,0,1)*(sd(as.matrix(friedman.df)[,11])/2) +X_train <- X[train_inds,] +X_test <- X[test_inds,] +y_train <- y[train_inds] +y_test <- y[test_inds] + +# Fit the GP +model_gp <- bgp(X=X_train, Z=y_train, XX=X_test) +plot(model_gp$ZZ.mean, y_test, xlab = "predicted", ylab = "actual", main = "Gaussian process") +abline(0,1,lwd=2.5,lty=3,col="red") ``` -## Extracting the Kernel Matrix +Assess the RMSE ```{r} -#| eval: false -# Compute leaf co-membership kernel +sqrt(mean((model_gp$ZZ.mean - y_test)^2)) ``` -## Visualizing the Kernel +## BART-based Gaussian Process ```{r} -#| eval: false -# Heatmap of kernel matrix +# Run BART on the data +num_trees <- 200 +sigma_leaf <- 1/num_trees +X_train <- as.data.frame(X_train) +X_test <- as.data.frame(X_test) +mean_forest_params <- list(num_trees=num_trees, sigma2_leaf_init=sigma_leaf) +bart_model <- bart(X_train=X_train, y_train=y_train, X_test=X_test, mean_forest_params = mean_forest_params) + +# Extract kernels needed for kriging +leaf_mat_train <- computeForestLeafIndices(bart_model, X_train, forest_type = "mean", + forest_inds = bart_model$model_params$num_samples - 1) +leaf_mat_test <- computeForestLeafIndices(bart_model, X_test, forest_type = "mean", + forest_inds = bart_model$model_params$num_samples - 1) +W_train <- sparseMatrix(i=rep(1:length(y_train),num_trees), j=leaf_mat_train + 1, x=1) +W_test <- sparseMatrix(i=rep(1:length(y_test),num_trees), j=leaf_mat_test + 1, x=1) +Sigma_11 <- tcrossprod(W_test) / num_trees +Sigma_12 <- tcrossprod(W_test, W_train) / num_trees +Sigma_22 <- tcrossprod(W_train) / num_trees +Sigma_22_inv <- ginv(as.matrix(Sigma_22)) +Sigma_21 <- t(Sigma_12) + +# Compute mean and covariance for the test set posterior +mu_tilde <- Sigma_12 %*% Sigma_22_inv %*% y_train +Sigma_tilde <- as.matrix((sigma_leaf)*(Sigma_11 - Sigma_12 %*% Sigma_22_inv %*% Sigma_21)) + +# Sample from f(X_test) | X_test, X_train, f(X_train) +gp_samples <- mvtnorm::rmvnorm(1000, mean = mu_tilde, sigma = Sigma_tilde) + +# Compute posterior mean predictions for f(X_test) +yhat_mean_test <- colMeans(gp_samples) +plot(yhat_mean_test, y_test, xlab = "predicted", ylab = "actual", main = "BART Gaussian process") +abline(0,1,lwd=2.5,lty=3,col="red") ``` -## Downstream Applications +Assess the RMSE ```{r} -#| eval: false -# Clustering or GP prediction with the ensemble kernel +sqrt(mean((yhat_mean_test - y_test)^2)) ``` + +While the use case of a BART kernel for classical kriging is perhaps unclear without +more empirical investigation, the kernel approach can be very beneficial for causal +inference applications. + +# References diff --git a/vignettes/heteroskedastic.qmd b/vignettes/heteroskedastic.qmd index e1f48d9b0..fd9105741 100644 --- a/vignettes/heteroskedastic.qmd +++ b/vignettes/heteroskedastic.qmd @@ -1,107 +1,717 @@ --- title: "Heteroskedastic BART" +bibliography: vignettes.bib --- -BART with a forest-modeled error variance, where the residual variance -$\sigma^2(X_i)$ is itself a function of covariates learned via a separate -tree ensemble. +This vignette demonstrates how to use the `bart()` function for Bayesian supervised +learning (@chipman2010bart) and causal inference (@hahn2020bayesian), with an +additional "variance forest," for modeling conditional variance (see @murray2021log). -::: {.callout-note} -This vignette is under construction. Content will be ported from: +# Setup -- **Python**: `stochtree_repo/demo/notebooks/heteroskedastic_supervised_learning.ipynb` -- **R**: pkgdown article `Heteroskedasticity` -::: +::::{.panel-tabset group="language"} -## Background +## R -Standard BART assumes homoskedastic errors $\epsilon_i \sim \mathcal{N}(0, \sigma^2)$. -When error variance varies with covariates, modeling $\sigma^2(X_i)$ jointly with -the mean function improves inference. The model is: +```{r} +library(stochtree) +``` -$$ -Y_i = \mu(X_i) + \epsilon_i, \quad \epsilon_i \sim \mathcal{N}(0, \sigma^2(X_i)) -$$ +## Python -where both $\mu(\cdot)$ and $\log \sigma^2(\cdot)$ are modeled with BART ensembles. +```{python} +# Python implementation coming soon +``` -## Setup +:::: + +# Section 1: Supervised Learning + +## Demo 1: Variance-Only Simulation (Simple DGP) + +Here, we generate data with a constant (zero) mean and a relatively simple +covariate-modified variance function. + +\begin{equation*} +\begin{aligned} +y &= 0 + \sigma(X) \epsilon\\ +\sigma^2(X) &= \begin{cases} +0.5 & X_1 \geq 0 \text{ and } X_1 < 0.25\\ +1 & X_1 \geq 0.25 \text{ and } X_1 < 0.5\\ +2 & X_1 \geq 0.5 \text{ and } X_1 < 0.75\\ +3 & X_1 \geq 0.75 \text{ and } X_1 < 1\\ +\end{cases}\\ +X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ +\epsilon &\sim \mathcal{N}\left(0,1\right) +\end{aligned} +\end{equation*} + +### Simulation ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -library(stochtree) +# Generate the data +n <- 500 +p_x <- 10 +X <- matrix(runif(n*p_x), ncol = p_x) +f_XW <- 0 +s_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (3) +) +y <- f_XW + rnorm(n, 0, 1)*s_XW + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) +y_test <- y[test_inds] +y_train <- y[train_inds] +s_x_test <- s_XW[test_inds] +s_x_train <- s_XW[train_inds] +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +### Sampling and Analysis + +#### Warmstart + +We first sample the $\sigma^2(X)$ ensemble using "warm-start" initialization +(@he2023stochastic). This is the default in `stochtree`. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +num_trees <- 20 +num_samples <- num_gfr + num_burnin + num_mcmc +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0) +variance_forest_params <- list(num_trees = num_trees) +bart_model_warmstart <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Inspect the MCMC samples. + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +#### MCMC + +We now sample the $\sigma^2(X)$ ensemble using MCMC with root initialization (as in +@chipman2010bart). + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 0 +num_burnin <- 1000 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0) +variance_forest_params <- list(num_trees = num_trees) +bart_model_mcmc <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Inspect the MCMC samples. + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) ``` ## Python ```{python} -#| eval: false -import stochtree -import numpy as np +# Python implementation coming soon ``` :::: -## Data Simulation +## Demo 2: Variance-Only Simulation (Complex DGP) + +Here, we generate data with a constant (zero) mean and a more complex +covariate-modified variance function. + +\begin{equation*} +\begin{aligned} +y &= 0 + \sigma(X) \epsilon\\ +\sigma^2(X) &= \begin{cases} +0.25X_3^2 & X_1 \geq 0 \text{ and } X_1 < 0.25\\ +1X_3^2 & X_1 \geq 0.25 \text{ and } X_1 < 0.5\\ +4X_3^2 & X_1 \geq 0.5 \text{ and } X_1 < 0.75\\ +9X_3^2 & X_1 \geq 0.75 \text{ and } X_1 < 1\\ +\end{cases}\\ +X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ +\epsilon &\sim \mathcal{N}\left(0,1\right) +\end{aligned} +\end{equation*} + +### Simulation ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Simulate data with non-constant variance +# Generate the data +n <- 500 +p_x <- 10 +X <- matrix(runif(n*p_x), ncol = p_x) +f_XW <- 0 +s_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5*X[,3]) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1*X[,3]) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2*X[,3]) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (3*X[,3]) +) +y <- f_XW + rnorm(n, 0, 1)*s_XW + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) +y_test <- y[test_inds] +y_train <- y[train_inds] +s_x_test <- s_XW[test_inds] +s_x_train <- s_XW[train_inds] ``` ## Python ```{python} -#| eval: false -# Simulate data with non-constant variance +# Python implementation coming soon ``` :::: -## Model Fitting +### Sampling and Analysis + +#### Warmstart ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Fit heteroskedastic BART +num_trees <- 20 +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = num_trees, alpha = 0.95, + beta = 1.25, min_samples_leaf = 1) +bart_model_warmstart <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) ``` ## Python ```{python} -#| eval: false -# Fit heteroskedastic BART +# Python implementation coming soon ``` :::: -## Posterior Summaries +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +#### MCMC ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Mean and variance posterior summaries +num_gfr <- 0 +num_burnin <- 1000 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = num_trees, alpha = 0.95, + beta = 1.25, min_samples_leaf = 1) +bart_model_mcmc <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) ``` ## Python ```{python} -#| eval: false -# Mean and variance posterior summaries +# Python implementation coming soon ``` :::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Demo 3: Mean and Variance Simulation (Simple DGP) + +Here, we generate data with (relatively simple) covariate-modified mean and variance +functions. + +\begin{equation*} +\begin{aligned} +y &= f(X) + \sigma(X) \epsilon\\ +f(X) &= \begin{cases} +-6 & X_2 \geq 0 \text{ and } X_2 < 0.25\\ +-2 & X_2 \geq 0.25 \text{ and } X_2 < 0.5\\ +2 & X_2 \geq 0.5 \text{ and } X_2 < 0.75\\ +6 & X_2 \geq 0.75 \text{ and } X_2 < 1\\ +\end{cases}\\ +\sigma^2(X) &= \begin{cases} +0.25 & X_1 \geq 0 \text{ and } X_1 < 0.25\\ +1 & X_1 \geq 0.25 \text{ and } X_1 < 0.5\\ +4 & X_1 \geq 0.5 \text{ and } X_1 < 0.75\\ +9 & X_1 \geq 0.75 \text{ and } X_1 < 1\\ +\end{cases}\\ +X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ +\epsilon &\sim \mathcal{N}\left(0,1\right) +\end{aligned} +\end{equation*} + +### Simulation + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Generate the data +n <- 500 +p_x <- 10 +X <- matrix(runif(n*p_x), ncol = p_x) +f_XW <- ( + ((0 <= X[,2]) & (0.25 > X[,2])) * (-6) + + ((0.25 <= X[,2]) & (0.5 > X[,2])) * (-2) + + ((0.5 <= X[,2]) & (0.75 > X[,2])) * (2) + + ((0.75 <= X[,2]) & (1 > X[,2])) * (6) +) +s_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (3) +) +y <- f_XW + rnorm(n, 0, 1)*s_XW + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) +y_test <- y[test_inds] +y_train <- y[train_inds] +f_x_test <- f_XW[test_inds] +s_x_test <- s_XW[test_inds] +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +### Sampling and Analysis + +#### Warmstart + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = 50, alpha = 0.95, + beta = 1.25, min_samples_leaf = 5) +bart_model_warmstart <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_warmstart$y_hat_test), y_test, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") +abline(0,1,col="red",lty=2,lwd=2.5) +plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +#### MCMC + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 0 +num_burnin <- 1000 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = 50, alpha = 0.95, + beta = 1.25, min_samples_leaf = 5) +bart_model_mcmc <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_mcmc$y_hat_test), y_test, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") +abline(0,1,col="red",lty=2,lwd=2.5) +plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Demo 4: Mean and Variance Simulation (Complex DGP) + +Here, we generate data with more complex covariate-modified mean and variance +functions. + +\begin{equation*} +\begin{aligned} +y &= f(X) + \sigma(X) \epsilon\\ +f(X) &= \begin{cases} +-6X_4 & X_2 \geq 0 \text{ and } X_2 < 0.25\\ +-2X_4 & X_2 \geq 0.25 \text{ and } X_2 < 0.5\\ +2X_4 & X_2 \geq 0.5 \text{ and } X_2 < 0.75\\ +6X_4 & X_2 \geq 0.75 \text{ and } X_2 < 1\\ +\end{cases}\\ +\sigma^2(X) &= \begin{cases} +0.25X_3^2 & X_1 \geq 0 \text{ and } X_1 < 0.25\\ +1X_3^2 & X_1 \geq 0.25 \text{ and } X_1 < 0.5\\ +4X_3^2 & X_1 \geq 0.5 \text{ and } X_1 < 0.75\\ +9X_3^2 & X_1 \geq 0.75 \text{ and } X_1 < 1\\ +\end{cases}\\ +X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ +\epsilon &\sim \mathcal{N}\left(0,1\right) +\end{aligned} +\end{equation*} + +### Simulation + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Generate the data +n <- 500 +p_x <- 10 +X <- matrix(runif(n*p_x), ncol = p_x) +f_XW <- ( + ((0 <= X[,2]) & (0.25 > X[,2])) * (-6*X[,4]) + + ((0.25 <= X[,2]) & (0.5 > X[,2])) * (-2*X[,4]) + + ((0.5 <= X[,2]) & (0.75 > X[,2])) * (2*X[,4]) + + ((0.75 <= X[,2]) & (1 > X[,2])) * (6*X[,4]) +) +s_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5*X[,3]) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1*X[,3]) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2*X[,3]) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (3*X[,3]) +) +y <- f_XW + rnorm(n, 0, 1)*s_XW + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) +y_test <- y[test_inds] +y_train <- y[train_inds] +f_x_test <- f_XW[test_inds] +s_x_test <- s_XW[test_inds] +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +### Sampling and Analysis + +#### Warmstart + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = 50, alpha = 0.95, + beta = 1.25, min_samples_leaf = 5) +bart_model_warmstart <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_warmstart$y_hat_test), y_test, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") +abline(0,1,col="red",lty=2,lwd=2.5) +plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +#### MCMC + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 0 +num_burnin <- 1000 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = 50, alpha = 0.95, + beta = 1.25, min_samples_leaf = 5) +bart_model_mcmc <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(rowMeans(bart_model_mcmc$y_hat_test), y_test, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") +abline(0,1,col="red",lty=2,lwd=2.5) +plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, + pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# References diff --git a/vignettes/multi-chain.qmd b/vignettes/multi-chain.qmd index fd42d45b3..348ab630c 100644 --- a/vignettes/multi-chain.qmd +++ b/vignettes/multi-chain.qmd @@ -1,110 +1,500 @@ --- title: "Multi-Chain Inference" +bibliography: vignettes.bib --- -Running multiple independent MCMC chains with `stochtree` and combining -their results for improved mixing diagnostics and more robust posterior summaries. +# Motivation -::: {.callout-note} -This vignette is under construction. Content will be ported from: +Mixing of an MCMC sampler is a perennial concern for complex Bayesian models. BART +and BCF are no exception. One common way to address such concerns is to run multiple +independent "chains" of an MCMC sampler, so that if each chain gets stuck in a +different region of the posterior, their combined samples attain better coverage of +the full posterior. -- **Python**: `stochtree_repo/demo/notebooks/multi_chain.ipynb` -- **R**: pkgdown article `MultiChain` -::: +This idea works with the classic "root-initialized" MCMC sampler of @chipman2010bart, +but a key insight of @he2023stochastic and @krantsevich2023stochastic is that the GFR +algorithm may be used to warm-start initialize multiple chains of the BART / BCF MCMC +sampler. -## Background +Operationally, the above two approaches have the same implementation (setting +`num_gfr > 0` if warm-start initialization is desired), so this vignette will +demonstrate how to run a multi-chain sampler sequentially or in parallel. -Running a single MCMC chain can be fragile. Multiple independent chains allow: - -- **Convergence diagnostics**: $\hat{R}$ (potential scale reduction factor) detects - non-convergence across chains. -- **Parallelism**: Chains can run concurrently on multiple cores. -- **Ensemble pooling**: Posterior samples from all chains can be pooled for final inference. - -## Setup +# Setup ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false +#| warning: false +#| message: false library(stochtree) +library(ggplot2) +library(coda) +library(bayesplot) +library(foreach) library(doParallel) ``` ## Python ```{python} -#| eval: false -import stochtree -import numpy as np -from multiprocessing import Pool +# Python implementation coming soon ``` :::: +# Demo 1: Supervised Learning + ## Data Simulation +Simulate a simple partitioned linear model. + ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Simulate data +# Generate the data +set.seed(1111) +n <- 500 +p_x <- 10 +p_w <- 1 +snr <- 3 +X <- matrix(runif(n * p_x), ncol = p_x) +leaf_basis <- matrix(runif(n * p_w), ncol = p_w) +f_XW <- (((0 <= X[, 1]) & (0.25 > X[, 1])) * + (-7.5 * leaf_basis[, 1]) + + ((0.25 <= X[, 1]) & (0.5 > X[, 1])) * (-2.5 * leaf_basis[, 1]) + + ((0.5 <= X[, 1]) & (0.75 > X[, 1])) * (2.5 * leaf_basis[, 1]) + + ((0.75 <= X[, 1]) & (1 > X[, 1])) * (7.5 * leaf_basis[, 1])) +noise_sd <- sd(f_XW) / snr +y <- f_XW + rnorm(n, 0, 1) * noise_sd + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct * n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- X[test_inds, ] +X_train <- X[train_inds, ] +leaf_basis_test <- leaf_basis[test_inds, ] +leaf_basis_train <- leaf_basis[train_inds, ] +y_test <- y[test_inds] +y_train <- y[train_inds] ``` ## Python ```{python} -#| eval: false -# Simulate data +# Python implementation coming soon ``` :::: -## Running Multiple Chains +## Sampling Multiple Chains Sequentially from Scratch + +The simplest way to sample multiple chains of a stochtree model is to do so +"sequentially," that is, after chain 1 is sampled, chain 2 is sampled from a +different starting state, and similarly for each of the requested chains. This is +supported internally in both the `bart()` and `bcf()` functions, with the +`num_chains` parameter in the `general_params` list. + +Define some high-level parameters, including number of chains to run and number of +samples per chain. Here we run 4 independent chains with 2000 MCMC iterations, each +of which is burned in for 1000 iterations. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Run chains in parallel with doParallel +num_chains <- 4 +num_gfr <- 0 +num_burnin <- 1000 +num_mcmc <- 2000 ``` ## Python ```{python} -#| eval: false -# Run chains sequentially or with multiprocessing +# Python implementation coming soon ``` :::: -## Convergence Diagnostics +Run the sampler. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Compute R-hat, trace plots +bart_model <- stochtree::bart( + X_train = X_train, + leaf_basis_train = leaf_basis_train, + y_train = y_train, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = list(num_chains = num_chains) +) ``` ## Python ```{python} -#| eval: false -# Compute R-hat, trace plots +# Python implementation coming soon +``` + +:::: + +Now we have a `bartmodel` object with `num_chains * num_mcmc` samples stored +internally. These samples are arranged sequentially, with the first `num_mcmc` +samples corresponding to chain 1, the next `num_mcmc` samples to chain 2, etc. + +Since each chain is a set of samples of the same model, we can analyze the samples +collectively, for example, by looking at out-of-sample predictions. + +::::{.panel-tabset group="language"} + +## R + +```{r} +y_hat_test <- predict( + bart_model, + X = X_test, + leaf_basis = leaf_basis_test, + type = "mean", + terms = "y_hat" +) +plot(y_hat_test, y_test, xlab = "Predicted", ylab = "Actual") +abline(0, 1, col = "red", lty = 3, lwd = 3) +``` + +## Python + +```{python} +# Python implementation coming soon ``` :::: -## Combining Chain Results +Now, suppose we want to analyze each of the chains separately to assess mixing / +convergence. We can construct an `mcmc.list` in the `coda` package to perform various +diagnostics. + +::::{.panel-tabset group="language"} + +## R + +```{r} +sigma2_coda_list <- coda::as.mcmc.list(lapply( + 1:num_chains, + function(chain_idx) { + offset <- (chain_idx - 1) * num_mcmc + inds_start <- offset + 1 + inds_end <- offset + num_mcmc + coda::mcmc(bart_model$sigma2_global_samples[inds_start:inds_end]) + } +)) +traceplot(sigma2_coda_list, ylab = expression(sigma^2)) +abline(h = noise_sd^2, col = "black", lty = 3, lwd = 3) +acf <- autocorr.diag(sigma2_coda_list) +ess <- effectiveSize(sigma2_coda_list) +rhat <- gelman.diag(sigma2_coda_list, autoburnin = F) +cat(paste0( + "Average autocorrelation across chains:\n", + paste0(paste0(rownames(acf), ": ", round(acf, 3)), collapse = ", "), + "\nTotal effective sample size across chains: ", + paste0(round(ess, 1), collapse = ", "), + "\n'R-hat' potential scale reduction factor of Gelman and Rubin (1992)): ", + paste0(round(rhat$psrf[, 1], 3), collapse = ", ") +)) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +We can convert this to an array to be consumed by the `bayesplot` package. + +::::{.panel-tabset group="language"} + +## R + +```{r} +coda_array <- as.array(sigma2_coda_list) +dim(coda_array) <- c(nrow(coda_array), ncol(coda_array), 1) +dimnames(coda_array) <- list( + Iteration = paste0("iter", 1:num_mcmc), + Chain = paste0("chain", 1:num_chains), + Parameter = "sigma2_global" +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +From here, we can visualize the posterior of $\sigma^2$ for each chain, comparing +to the true simulated value. + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| warning: false +#| message: false +bayesplot::mcmc_hist_by_chain( + coda_array, + pars = "sigma2_global" +) + + ggplot2::labs( + title = "Global error scale posterior by chain", + x = expression(sigma^2) + ) + + ggplot2::theme( + plot.title = ggplot2::element_text(hjust = 0.5) + ) + + ggplot2::geom_vline( + xintercept = noise_sd^2, + color = "black", + linetype = "dashed", + size = 1 + ) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Sampling Multiple Chains Sequentially from XBART Forests + +In the example above, each chain was initialized from "root". If we sample a model +using a small number of 'grow-from-root' iterations, we can use these forests to +initialize MCMC chains. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_chains <- 4 +num_gfr <- 5 +num_burnin <- 1000 +num_mcmc <- 2000 +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Run the initial GFR sampler. + +::::{.panel-tabset group="language"} + +## R + +```{r} +xbart_model <- stochtree::bart( + X_train = X_train, + leaf_basis_train = leaf_basis_train, + y_train = y_train, + num_gfr = num_gfr, + num_burnin = 0, + num_mcmc = 0 +) +xbart_model_string <- stochtree::saveBARTModelToJsonString(xbart_model) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Run the multi-chain BART sampler, with each chain initialized from a different GFR +forest. + +::::{.panel-tabset group="language"} + +## R + +```{r} +bart_model <- stochtree::bart( + X_train = X_train, + leaf_basis_train = leaf_basis_train, + y_train = y_train, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = list(num_chains = num_chains), + previous_model_json = xbart_model_string, + previous_model_warmstart_sample_num = num_gfr +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +y_hat_test <- predict( + bart_model, + X = X_test, + leaf_basis = leaf_basis_test, + type = "mean", + terms = "y_hat" +) +plot(y_hat_test, y_test, xlab = "Predicted", ylab = "Actual") +abline(0, 1, col = "red", lty = 3, lwd = 3) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +sigma2_coda_list <- coda::as.mcmc.list(lapply( + 1:num_chains, + function(chain_idx) { + offset <- (chain_idx - 1) * num_mcmc + inds_start <- offset + 1 + inds_end <- offset + num_mcmc + coda::mcmc(bart_model$sigma2_global_samples[inds_start:inds_end]) + } +)) +traceplot(sigma2_coda_list, ylab = expression(sigma^2)) +abline(h = noise_sd^2, col = "black", lty = 3, lwd = 3) +acf <- autocorr.diag(sigma2_coda_list) +ess <- effectiveSize(sigma2_coda_list) +rhat <- gelman.diag(sigma2_coda_list, autoburnin = F) +cat(paste0( + "Average autocorrelation across chains:\n", + paste0(paste0(rownames(acf), ": ", round(acf, 3)), collapse = ", "), + "\nTotal effective sample size across chains: ", + paste0(round(ess, 1), collapse = ", "), + "\n'R-hat' potential scale reduction factor of Gelman and Rubin (1992)): ", + paste0(round(rhat$psrf[, 1], 3), collapse = ", ") +)) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +coda_array <- as.array(sigma2_coda_list) +dim(coda_array) <- c(nrow(coda_array), ncol(coda_array), 1) +dimnames(coda_array) <- list( + Iteration = paste0("iter", 1:num_mcmc), + Chain = paste0("chain", 1:num_chains), + Parameter = "sigma2_global" +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +#| warning: false +#| message: false +bayesplot::mcmc_hist_by_chain( + coda_array, + pars = "sigma2_global" +) + + ggplot2::labs( + title = "Global error scale posterior by chain", + x = expression(sigma^2) + ) + + ggplot2::theme( + plot.title = ggplot2::element_text(hjust = 0.5) + ) + + ggplot2::geom_vline( + xintercept = noise_sd^2, + color = "black", + linetype = "dashed", + size = 1 + ) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Sampling Multiple Chains in Parallel + +While the above examples used sequential multi-chain sampling internally in `bart()` +and `bcf()`, it is also possible to run chains in parallel via `doParallel`. While +`bartmodel` or `bcfmodel` objects contain external pointers to C++ data structures +which are not reachable by other processes, we can serialize `stochtree` models to +JSON for cross-process communication. After `num_chains` models have been run in +parallel and their JSON representations have been collated in the primary R session, +we can combine these into a single `bartmodel` or `bcfmodel` object via +`createBARTModelFromCombinedJsonString()` or `createBCFModelFromCombinedJsonString()`. + +In order to run multiple parallel stochtree chains, a parallel backend must be +registered in your R environment. Note that we do not evaluate the cluster setup +code below in order to interact nicely with CRAN / GitHub Actions environments. ::::{.panel-tabset group="language"} @@ -112,14 +502,194 @@ from multiprocessing import Pool ```{r} #| eval: false -# Pool posterior samples across chains +ncores <- parallel::detectCores() +cl <- makeCluster(ncores) +registerDoParallel(cl) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_chains <- 4 +num_gfr <- 0 +num_burnin <- 100 +num_mcmc <- 100 ``` ## Python ```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +bart_model_outputs <- foreach(i = 1:num_chains) %dopar% + { + random_seed <- i + general_params <- list(sample_sigma2_global = T, random_seed = random_seed) + mean_forest_params <- list(sample_sigma2_leaf = F) + bart_model <- stochtree::bart( + X_train = X_train, + leaf_basis_train = leaf_basis_train, + y_train = y_train, + X_test = X_test, + leaf_basis_test = leaf_basis_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + mean_forest_params = mean_forest_params + ) + bart_model_string <- stochtree::saveBARTModelToJsonString(bart_model) + y_hat_test <- bart_model$y_hat_test + list(model = bart_model_string, yhat = y_hat_test) + } +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Close the parallel cluster (not evaluated here). + +::::{.panel-tabset group="language"} + +## R + +```{r} #| eval: false -# Pool posterior samples across chains +stopCluster(cl) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Combine the forests from each BART model into a single forest. + +::::{.panel-tabset group="language"} + +## R + +```{r} +bart_model_strings <- list() +bart_model_yhats <- matrix(NA, nrow = length(y_test), ncol = num_chains) +for (i in 1:length(bart_model_outputs)) { + bart_model_strings[[i]] <- bart_model_outputs[[i]]$model + bart_model_yhats[, i] <- rowMeans(bart_model_outputs[[i]]$yhat) +} +combined_bart <- createBARTModelFromCombinedJsonString(bart_model_strings) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +yhat_combined <- predict(combined_bart, X_test, leaf_basis_test)$y_hat +``` + +## Python + +```{python} +# Python implementation coming soon ``` :::: + +Compare average predictions from each chain to the original predictions and to +the true $y$ values. + +::::{.panel-tabset group="language"} + +## R + +```{r} +par(mfrow = c(1, 2)) +for (i in 1:num_chains) { + offset <- (i - 1) * num_mcmc + inds_start <- offset + 1 + inds_end <- offset + num_mcmc + plot( + rowMeans(yhat_combined[, inds_start:inds_end]), + bart_model_yhats[, i], + xlab = "deserialized", + ylab = "original", + main = paste0("Chain ", i, "\nPredictions") + ) + abline(0, 1, col = "red", lty = 3, lwd = 3) +} +par(mfrow = c(1, 1)) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +par(mfrow = c(1, 2)) +for (i in 1:num_chains) { + offset <- (i - 1) * num_mcmc + inds_start <- offset + 1 + inds_end <- offset + num_mcmc + plot( + rowMeans(yhat_combined[, inds_start:inds_end]), + y_test, + xlab = "predicted", + ylab = "actual", + main = paste0("Chain ", i, "\nPredictions") + ) + abline(0, 1, col = "red", lty = 3, lwd = 3) +} +par(mfrow = c(1, 1)) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# References diff --git a/vignettes/ordinal-outcome.qmd b/vignettes/ordinal-outcome.qmd index 1a95b8438..c6bd9d32b 100644 --- a/vignettes/ordinal-outcome.qmd +++ b/vignettes/ordinal-outcome.qmd @@ -1,53 +1,389 @@ --- title: "Ordinal Outcome Modeling" +bibliography: vignettes.bib --- -BART for ordinal (ordered categorical) responses, using a latent variable -formulation with learned cutpoints. +This vignette demonstrates how to use BART to model ordinal outcomes with a +complementary log-log (cloglog) link function (@alam2025unified). -::: {.callout-note} -This vignette is under construction. Content will be ported from: +## Introduction to Ordinal BART with CLogLog Link -- **Python**: `stochtree_repo/demo/notebooks/ordinal_outcome.ipynb` -::: - -## Background - -For an ordinal response $Y_i \in \{1, 2, \ldots, K\}$, the model introduces a -latent continuous variable $Z_i$ and cutpoints $\gamma_1 < \gamma_2 < \cdots < \gamma_{K-1}$: +Ordinal data refers to outcomes that have a natural ordering but undefined distances +between categories. Examples include survey responses (strongly disagree, disagree, +neutral, agree, strongly agree), severity ratings (mild, moderate, severe), or +educational levels (elementary, high school, college, graduate). +The complementary log-log (cloglog) model uses the cumulative link function +$$ +\text{cloglog}(p) = \log(-\log(1-p)) $$ -Y_i = k \iff \gamma_{k-1} < Z_i \leq \gamma_k +to express cumulative category probabilities as a function of covariates $$ +\text{cloglog}(P(Y \leq k \mid X = x)) = \log(-\log(1-P(Y \leq k \mid X = x))) = \gamma_k + \lambda(x) +$$ + +This link function is asymmetric and particularly appropriate when the probability of +being in higher categories changes rapidly at certain thresholds, making it different +from the symmetric probit or logit links commonly used in ordinal regression. -where $Z_i = \mu(X_i) + \epsilon_i$ with $\epsilon_i \sim \mathcal{N}(0,1)$ and -$\mu(\cdot)$ is a BART ensemble. +In `stochtree`, we let $\lambda(x)$ be represented by a stochastic tree ensemble. ## Setup +::::{.panel-tabset group="language"} + +## R + +```{r} +library(stochtree) +``` + +## Python + ```{python} -#| eval: false -import stochtree -import numpy as np +# Python implementation coming soon ``` +:::: + ## Data Simulation +We simulate a dataset with an ordinal outcome with three categories, +$y_i \in \left\{1,2,3\right\}$ whose probabilities depend on covariates, $X$. + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Set seed +random_seed <- 2026 +set.seed(random_seed) + +# Sample size and number of predictors +n <- 2000 +p <- 5 + +# Design matrix and true lambda function +X <- matrix(rnorm(n * p), n, p) +beta <- rep(1 / sqrt(p), p) +true_lambda_function <- X %*% beta + +# Set cutpoints for ordinal categories (3 categories: 1, 2, 3) +n_categories <- 3 +gamma_true <- c(-2, 1) +ordinal_cutpoints <- log(cumsum(exp(gamma_true))) + +# True ordinal class probabilities +true_probs <- matrix(0, nrow = n, ncol = n_categories) +for (j in 1:n_categories) { + if (j == 1) { + true_probs[, j] <- 1 - exp(-exp(gamma_true[j] + true_lambda_function)) + } else if (j == n_categories) { + true_probs[, j] <- 1 - rowSums(true_probs[, 1:(j - 1), drop = FALSE]) + } else { + true_probs[, j] <- exp(-exp(gamma_true[j - 1] + true_lambda_function)) * + (1 - exp(-exp(gamma_true[j] + true_lambda_function))) + } +} + +# Generate ordinal outcomes +y <- sapply(1:nrow(X), function(i) { + sample(1:n_categories, 1, prob = true_probs[i, ]) +}) +cat("Outcome distribution:", table(y), "\n") + +# Train test split +train_idx <- sample(1:n, size = floor(0.8 * n)) +test_idx <- setdiff(1:n, train_idx) +X_train <- X[train_idx, ] +y_train <- y[train_idx] +X_test <- X[test_idx, ] +y_test <- y[test_idx] +``` + +## Python + ```{python} -#| eval: false -# Simulate ordinal outcome data +# Python implementation coming soon ``` +:::: + ## Model Fitting +We specify the cloglog link function for modeling an ordinal outcome by setting +`outcome_model=OutcomeModel(outcome="ordinal", link="cloglog")` in the +`general_params` argument list. Since ordinal outcomes are incompatible with the +Gaussian global error variance model, we also set `sample_sigma2_global=FALSE`. + +We also override the default `num_trees` for the mean forest (200) in favor of +greater regularization for the ordinal model and set `sample_sigma2_leaf=FALSE`. + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Sample the cloglog ordinal BART model +bart_model <- bart( + X_train = X_train, + y_train = y_train, + X_test = X_test, + num_gfr = 0, + num_burnin = 1000, + num_mcmc = 1000, + general_params = list( + cutpoint_grid_size = 100, + sample_sigma2_global = FALSE, + keep_every = 1, + num_chains = 1, + verbose = FALSE, + random_seed = random_seed, + outcome_model = OutcomeModel(outcome = 'ordinal', link = 'cloglog') + ), + mean_forest_params = list(num_trees = 50, sample_sigma2_leaf = FALSE) +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Prediction + +As with any other BART model in `stochtree`, we can use the `predict` function on +our ordinal model. Specifying `scale = "linear"` and `terms = "y_hat"` will simply +return predictions from the estimated $\lambda(x)$ function, but users can estimate +class probabilities via `scale = "probability"`, which by default returns an array of +dimension (`num_observations`, `num_categories`, `num_samples`). Specifying +`type = "mean"` collapses the output to a `num_observations` x `num_categories` +matrix with the average posterior class probability for each observation. Users can +also specify `type = "class"` for the maximum a posteriori (MAP) class label estimate +for each draw of each observation. + +Below we compute the posterior class probabilities for the train and test sets. + +::::{.panel-tabset group="language"} + +## R + +```{r} +est_probs_train <- predict( + bart_model, + X = X_train, + scale = "probability", + terms = "y_hat" +) +est_probs_test <- predict( + bart_model, + X = X_test, + scale = "probability", + terms = "y_hat" +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Model Results and Interpretation + +Since one of the "cutpoints" is fixed for identifiability, we plot the posterior +distributions of the other two cutpoints and compare them to their true simulated +values (blue dotted lines). + +The cutpoint samples are accessed via `extractParameter(bart_model, "cloglog_cutpoints")` +(shape: `(n_categories - 1, num_samples)`) and are shifted by the per-sample mean of +the training predictions to account for the non-identifiable intercept. + +::::{.panel-tabset group="language"} + +## R + +```{r} +y_hat_train_post <- predict( + bart_model, + X = X_train, + scale = "linear", + terms = "y_hat", + type = "posterior" +) +cutpoint_samples <- extractParameter(bart_model, "cloglog_cutpoints") +gamma1 <- cutpoint_samples[1, ] + colMeans(y_hat_train_post) +hist( + gamma1, + main = "Posterior Distribution of Cutpoint 1", + xlab = "Cutpoint 1", + freq = FALSE +) +abline(v = gamma_true[1], col = 'blue', lty = 3, lwd = 3) +gamma2 <- cutpoint_samples[2, ] + colMeans(y_hat_train_post) +hist( + gamma2, + main = "Posterior Distribution of Cutpoint 2", + xlab = "Cutpoint 2", + freq = FALSE +) +abline(v = gamma_true[2], col = 'blue', lty = 3, lwd = 3) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +We can compare the true value of the latent "utility function" $\lambda(x)$ to the +(mean-shifted) BART forest predictions. + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Train set predicted versus actual +y_hat_train <- predict( + bart_model, + X = X_train, + scale = "linear", + terms = "y_hat", + type = "mean" +) +lambda_pred_train <- y_hat_train - mean(y_hat_train) +plot( + lambda_pred_train, + true_lambda_function[train_idx], + main = "Train Set: Predicted vs Actual", + xlab = "Predicted", + ylab = "Actual" +) +abline(a = 0, b = 1, col = 'blue', lwd = 2) +cor_train <- cor(true_lambda_function[train_idx], lambda_pred_train) +text( + min(lambda_pred_train), + max(true_lambda_function[train_idx]), + paste('Correlation:', round(cor_train, 3)), + adj = 0, + col = 'red' +) + +# Test set predicted versus actual +y_hat_test <- predict( + bart_model, + X = X_test, + scale = "linear", + terms = "y_hat", + type = "mean" +) +lambda_pred_test <- y_hat_test - mean(y_hat_test) +plot( + lambda_pred_test, + true_lambda_function[test_idx], + main = "Test Set: Predicted vs Actual", + xlab = "Predicted", + ylab = "Actual" +) +abline(a = 0, b = 1, col = 'blue', lwd = 2) +cor_test <- cor(true_lambda_function[test_idx], lambda_pred_test) +text( + min(lambda_pred_test), + max(true_lambda_function[test_idx]), + paste('Correlation:', round(cor_test, 3)), + adj = 0, + col = 'red' +) +``` + +## Python + ```{python} -#| eval: false -# Fit ordinal BART +# Python implementation coming soon ``` -## Posterior Summaries +:::: + +Finally, we compare the estimated class probabilities with their true simulated values +for each class on the training set. + +::::{.panel-tabset group="language"} + +## R + +```{r} +for (j in 1:n_categories) { + mean_probs <- rowMeans(est_probs_train[, j, ]) + plot( + true_probs[train_idx, j], + mean_probs, + main = paste("Training Set: True vs Estimated Probability, Class", j), + xlab = "True Class Probability", + ylab = "Estimated Class Probability" + ) + abline(a = 0, b = 1, col = 'blue', lwd = 2) + cor_train_prob <- cor(true_probs[train_idx, j], mean_probs) + text( + min(true_probs[train_idx, j]), + max(mean_probs), + paste('Correlation:', round(cor_train_prob, 3)), + adj = 0, + col = 'red' + ) +} +``` + +## Python ```{python} -#| eval: false -# Category probabilities, cumulative probabilities +# Python implementation coming soon ``` + +:::: + +And the same comparison on the test set. + +::::{.panel-tabset group="language"} + +## R + +```{r} +for (j in 1:n_categories) { + mean_probs <- rowMeans(est_probs_test[, j, ]) + plot( + true_probs[test_idx, j], + mean_probs, + main = paste("Test Set: True vs Estimated Probability, Class", j), + xlab = "True Class Probability", + ylab = "Estimated Class Probability" + ) + abline(a = 0, b = 1, col = 'blue', lwd = 2) + cor_test_prob <- cor(true_probs[test_idx, j], mean_probs) + text( + min(true_probs[test_idx, j]), + max(mean_probs), + paste('Correlation:', round(cor_test_prob, 3)), + adj = 0, + col = 'red' + ) +} +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# References diff --git a/vignettes/prior-calibration.qmd b/vignettes/prior-calibration.qmd index 91695c369..267200afa 100644 --- a/vignettes/prior-calibration.qmd +++ b/vignettes/prior-calibration.qmd @@ -1,62 +1,103 @@ --- title: "Prior Calibration" +bibliography: vignettes.bib --- -Choosing and calibrating the BART tree priors — including the branching process -prior on tree structure and the leaf parameter prior — to match prior beliefs -about the scale of the outcome. +This vignette demonstrates prior calibration approaches for the parametric components +of stochastic tree ensembles (@chipman2010bart). -::: {.callout-note} -This vignette is under construction. Content will be ported from: +# Background -- **R**: pkgdown article `PriorCalibration` -::: +The "classic" BART model of @chipman2010bart -## Background +\begin{equation*} +\begin{aligned} +y &= f(X) + \epsilon\\ +f(X) &\sim \text{BART}\left(\alpha, \beta\right)\\ +\epsilon &\sim \mathcal{N}\left(0,\sigma^2\right)\\ +\sigma^2 &\sim \text{IG}\left(a,b\right) +\end{aligned} +\end{equation*} -BART uses two types of priors: +is semiparametric, with a nonparametric tree ensemble $f(X)$ and a homoskedastic error +variance parameter $\sigma^2$. Note that in @chipman2010bart, $a$ and $b$ are +parameterized with $a = \frac{\nu}{2}$ and $b = \frac{\nu\lambda}{2}$. -1. **Tree structure prior**: Controls tree depth via $P(\text{split at depth } d) = \alpha (1 + d)^{-\beta}$. - Defaults: $\alpha = 0.95$, $\beta = 2$. +# Setting Priors on Variance Parameters in `stochtree` -2. **Leaf prior**: $\mu_{ij} \sim \mathcal{N}(0, \sigma_\mu^2 / m)$ where $m$ is the number - of trees. The scale $\sigma_\mu$ is typically calibrated so that $m \cdot \sigma_\mu$ - spans the observed outcome range. +By default, `stochtree` employs a Jeffreys' prior for $\sigma^2$ +\begin{equation*} +\begin{aligned} +\sigma^2 &\propto \frac{1}{\sigma^2} +\end{aligned} +\end{equation*} +which corresponds to an improper prior with $a = 0$ and $b = 0$. -3. **Error variance prior**: $\sigma^2 \sim \text{Inv-}\chi^2(\nu, \lambda)$ with $\nu$ and - $\lambda$ calibrated from a preliminary linear model fit. +We provide convenience functions for users wishing to set the $\sigma^2$ prior as in +@chipman2010bart. In this case, $\nu$ is set by default to 3 and $\lambda$ is +calibrated as follows: -## Setup +1. An "overestimate," $\hat{\sigma}^2$, of $\sigma^2$ is obtained via simple linear + regression of $y$ on $X$ +2. $\lambda$ is chosen to ensure that $p(\sigma^2 < \hat{\sigma}^2) = q$ for some value + $q$, typically set to a default value of 0.9. + +This is done in `stochtree` via the `calibrateInverseGammaErrorVariance` function. ```{r} -#| eval: false +# Load library library(stochtree) -``` - -## Default Priors -```{r} -#| eval: false -# Examine default prior settings +# Generate data +n <- 500 +p <- 5 +X <- matrix(runif(n*p), ncol = p) +f_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (-7.5) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (-2.5) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2.5) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (7.5) +) +noise_sd <- 1 +y <- f_XW + rnorm(n, 0, noise_sd) + +# Test/train split +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- X[test_inds,] +X_train <- X[train_inds,] +y_test <- y[test_inds] +y_train <- y[train_inds] + +# Calibrate the scale parameter for the variance term as in Chipman et al (2010) +nu <- 3 +lambda <- calibrateInverseGammaErrorVariance(y_train, X_train, nu = nu) ``` -## Calibrating the Leaf Scale +Now we run a BART model with this variance parameterization ```{r} -#| eval: false -# Automatic calibration of sigma_mu based on outcome range +general_params <- list(sigma2_global_shape = nu/2, sigma2_global_scale = (nu*lambda)/2) +bart_model <- bart(X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = 0, num_burnin = 1000, num_mcmc = 100, + general_params = general_params) ``` -## Calibrating the Error Variance Prior +Inspect the out-of-sample predictions of the model ```{r} -#| eval: false -# Calibrate nu and lambda from a linear model +plot(rowMeans(bart_model$y_hat_test), y_test, xlab = "predicted", ylab = "actual") +abline(0,1,col="red",lty=3,lwd=3) ``` -## Sensitivity Analysis +Inspect the posterior samples of $\sigma^2$ ```{r} -#| eval: false -# Compare posteriors under different prior choices +plot(bart_model$sigma2_global_samples, ylab = "sigma^2", xlab = "iteration") +abline(h = noise_sd^2, col = "red", lty = 3, lwd = 3) ``` + +# References diff --git a/vignettes/serialization.qmd b/vignettes/serialization.qmd index 6226390f0..5ab4f416d 100644 --- a/vignettes/serialization.qmd +++ b/vignettes/serialization.qmd @@ -1,114 +1,349 @@ --- title: "Model Serialization" +bibliography: vignettes.bib --- -Saving fitted `stochtree` models to disk and reloading them for prediction -or further analysis — without re-running the sampler. +This vignette demonstrates how to serialize ensemble models to JSON files and +deserialize back to an R or Python session, where the forests and other parameters +can be used for prediction and further analysis. -::: {.callout-note} -This vignette is under construction. Content will be ported from: +We also define several simple helper functions used in the data generating processes +below. -- **Python**: `stochtree_repo/demo/notebooks/serialization.ipynb` -- **R**: pkgdown article `ModelSerialization` -::: +::::{.panel-tabset group="language"} + +## R + +```{r} +library(stochtree) +g <- function(x) {ifelse(x[,5]==1,2,ifelse(x[,5]==2,-1,-4))} +mu1 <- function(x) {1+g(x)+x[,1]*x[,3]} +mu2 <- function(x) {1+g(x)+6*abs(x[,3]-1)} +tau1 <- function(x) {rep(3,nrow(x))} +tau2 <- function(x) {1+2*x[,2]*x[,4]} +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# Demo 1: Bayesian Causal Forest (BCF) + +BCF models are initially sampled and constructed using the `bcf()` function. +Here we show how to save and reload models from JSON files on disk. + +## Model Building -## Setup +Draw from a modified version of the data generating process defined in +@hahn2020bayesian. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -library(stochtree) +# Generate synthetic data +n <- 1000 +snr <- 2 +x1 <- rnorm(n) +x2 <- rnorm(n) +x3 <- rnorm(n) +x4 <- as.numeric(rbinom(n,1,0.5)) +x5 <- as.numeric(sample(1:3,n,replace=TRUE)) +X <- cbind(x1,x2,x3,x4,x5) +p <- ncol(X) +mu_x <- mu1(X) +tau_x <- tau2(X) +pi_x <- 0.8*pnorm((3*mu_x/sd(mu_x)) - 0.5*X[,1]) + 0.05 + runif(n)/10 +Z <- rbinom(n,1,pi_x) +E_XZ <- mu_x + Z*tau_x +rfx_group_ids <- rep(c(1,2), n %/% 2) +rfx_coefs <- matrix(c(-1, -1, 1, 1), nrow=2, byrow=TRUE) +rfx_basis <- cbind(1, runif(n, -1, 1)) +rfx_term <- rowSums(rfx_coefs[rfx_group_ids,] * rfx_basis) +y <- E_XZ + rfx_term + rnorm(n, 0, 1)*(sd(E_XZ)/snr) +X <- as.data.frame(X) +X$x4 <- factor(X$x4, ordered = TRUE) +X$x5 <- factor(X$x5, ordered = TRUE) + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- X[test_inds,] +X_train <- X[train_inds,] +pi_test <- pi_x[test_inds] +pi_train <- pi_x[train_inds] +Z_test <- Z[test_inds] +Z_train <- Z[train_inds] +y_test <- y[test_inds] +y_train <- y[train_inds] +mu_test <- mu_x[test_inds] +mu_train <- mu_x[train_inds] +tau_test <- tau_x[test_inds] +tau_train <- tau_x[train_inds] +rfx_group_ids_test <- rfx_group_ids[test_inds] +rfx_group_ids_train <- rfx_group_ids[train_inds] +rfx_basis_test <- rfx_basis[test_inds,] +rfx_basis_train <- rfx_basis[train_inds,] +rfx_term_test <- rfx_term[test_inds] +rfx_term_train <- rfx_term[train_inds] ``` ## Python ```{python} -#| eval: false -import stochtree -import json +# Python implementation coming soon ``` :::: -## Fitting a Model +Sample a BCF model. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Fit a BART model to serialize +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +prognostic_forest_params <- list(sample_sigma2_leaf = F) +treatment_effect_forest_params <- list(sample_sigma2_leaf = F) +bcf_model <- bcf( + X_train = X_train, Z_train = Z_train, y_train = y_train, propensity_train = pi_train, + rfx_group_ids_train = rfx_group_ids_train, rfx_basis_train = rfx_basis_train, + X_test = X_test, Z_test = Z_test, propensity_test = pi_test, + rfx_group_ids_test = rfx_group_ids_test, rfx_basis_test = rfx_basis_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + prognostic_forest_params = prognostic_forest_params, + treatment_effect_forest_params = treatment_effect_forest_params +) ``` ## Python ```{python} -#| eval: false -# Fit a BART model to serialize +# Python implementation coming soon ``` :::: -## Saving to Disk +## Serialization + +Save the BCF model to disk. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Serialize model to JSON / binary +saveBCFModelToJsonFile(bcf_model, "bcf.json") ``` ## Python ```{python} -#| eval: false -# Serialize model to JSON / binary +# Python implementation coming soon ``` :::: -## Loading from Disk +## Deserialization + +Reload the BCF model from disk. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Load serialized model +bcf_model_reload <- createBCFModelFromJsonFile("bcf.json") ``` ## Python ```{python} -#| eval: false -# Load serialized model +# Python implementation coming soon ``` :::: -## Prediction After Loading +Check that the predictions align with those of the original model. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Predict from reloaded model +bcf_preds_reload <- predict(bcf_model_reload, X_train, Z_train, pi_train, rfx_group_ids_train, rfx_basis_train) +plot(rowMeans(bcf_model$mu_hat_train), rowMeans(bcf_preds_reload$mu_hat), + xlab = "Original", ylab = "Deserialized", main = "Prognostic forest") +abline(0,1,col="red",lwd=3,lty=3) +plot(rowMeans(bcf_model$tau_hat_train), rowMeans(bcf_preds_reload$tau_hat), + xlab = "Original", ylab = "Deserialized", main = "Treatment forest") +abline(0,1,col="red",lwd=3,lty=3) +plot(rowMeans(bcf_model$y_hat_train), rowMeans(bcf_preds_reload$y_hat), + xlab = "Original", ylab = "Deserialized", main = "Overall outcome") +abline(0,1,col="red",lwd=3,lty=3) ``` ## Python ```{python} -#| eval: false -# Predict from reloaded model +# Python implementation coming soon ``` :::: + +# Demo 2: BART + +BART models are initially sampled and constructed using the `bart()` function. +Here we show how to save and reload models from JSON files on disk. + +## Model Building + +Draw from a relatively straightforward heteroskedastic supervised learning DGP. + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Generate the data +n <- 500 +p_x <- 10 +X <- matrix(runif(n*p_x), ncol = p_x) +f_XW <- 0 +s_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5*X[,3]) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1*X[,3]) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2*X[,3]) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (3*X[,3]) +) +y <- f_XW + rnorm(n, 0, 1)*s_XW + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) +y_test <- y[test_inds] +y_train <- y[train_inds] +s_x_test <- s_XW[test_inds] +s_x_train <- s_XW[train_inds] +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Sample a BART model. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 100, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = 50, alpha = 0.95, + beta = 1.25, min_samples_leaf = 1) +bart_model <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Serialization + +Save the BART model to disk. + +::::{.panel-tabset group="language"} + +## R + +```{r} +saveBARTModelToJsonFile(bart_model, "bart.json") +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Deserialization + +Reload the BART model from disk. + +::::{.panel-tabset group="language"} + +## R + +```{r} +bart_model_reload <- createBARTModelFromJsonFile("bart.json") +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Check that the predictions align with those of the original model. + +::::{.panel-tabset group="language"} + +## R + +```{r} +bart_preds_reload <- predict(bart_model_reload, X_train) +plot(rowMeans(bart_model$y_hat_train), rowMeans(bart_preds_reload$y_hat), + xlab = "Original", ylab = "Deserialized", main = "Conditional Mean Estimates") +abline(0,1,col="red",lwd=3,lty=3) +plot(rowMeans(bart_model$sigma2_x_hat_train), rowMeans(bart_preds_reload$variance_forest_predictions), + xlab = "Original", ylab = "Deserialized", main = "Conditional Variance Estimates") +abline(0,1,col="red",lwd=3,lty=3) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# References diff --git a/vignettes/summary-plotting.qmd b/vignettes/summary-plotting.qmd index 66bad6779..6a0de91a6 100644 --- a/vignettes/summary-plotting.qmd +++ b/vignettes/summary-plotting.qmd @@ -1,56 +1,341 @@ --- title: "Summary and Plotting" +bibliography: vignettes.bib --- -Posterior summary utilities for `stochtree` models: credible intervals, -trace plots, partial dependence plots, and variable importance summaries. +This vignette demonstrates the summary and plotting utilities available for +`stochtree` models. -::: {.callout-note} -This vignette is under construction. Content will be ported from: +# Setup -- **Python**: `stochtree_repo/demo/notebooks/summary.ipynb` -::: +::::{.panel-tabset group="language"} -## Setup +## R + +```{r} +library(stochtree) +random_seed = 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +# Supervised Learning + +We begin with the supervised learning use case served by the `bart()` function. + +Below we simulate a simple regression dataset. + +::::{.panel-tabset group="language"} + +## R + +```{r} +n <- 1000 +p_x <- 10 +p_w <- 1 +X <- matrix(runif(n * p_x), ncol = p_x) +W <- matrix(runif(n * p_w), ncol = p_w) +f_XW <- (((0 <= X[, 10]) & (0.25 > X[, 10])) * + (-7.5 * W[, 1]) + + ((0.25 <= X[, 10]) & (0.5 > X[, 10])) * (-2.5 * W[, 1]) + + ((0.5 <= X[, 10]) & (0.75 > X[, 10])) * (2.5 * W[, 1]) + + ((0.75 <= X[, 10]) & (1 > X[, 10])) * (7.5 * W[, 1])) +noise_sd <- 1 +y <- f_XW + rnorm(n, 0, 1) * noise_sd +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Now we fit a simple BART model to the data. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 1000 +general_params <- list(num_chains = 3) +bart_model <- stochtree::bart( + X_train = X, + y_train = y, + leaf_basis_train = W, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +We obtain a high level summary of the BART model by running `print()`. + +::::{.panel-tabset group="language"} + +## R + +```{r} +print(bart_model) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +For a more detailed summary (including the information above), we use the `summary()` +function. + +::::{.panel-tabset group="language"} + +## R + +```{r} +summary(bart_model) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +We can use the `plot()` function to produce a traceplot of model terms like the global +error scale $\sigma^2$ or (if $\sigma^2$ is not sampled) the first observation of +cached train set predictions. + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(bart_model) +``` + +## Python ```{python} -#| eval: false -import stochtree -import numpy as np -import matplotlib.pyplot as plt +# Python implementation coming soon ``` -## Fitting a Model +:::: + +For finer-grained control over which parameters to plot, we can also use the +`extractParameter()` function to pull the posterior distribution of any valid model +term (e.g., global error scale $\sigma^2$, leaf scale $\sigma^2_{\ell}$, in-sample +mean function predictions `y_hat_train`) and then plot any subset or transformation +of these values. + +::::{.panel-tabset group="language"} + +## R + +```{r} +y_hat_train_samples <- extractParameter(bart_model, "y_hat_train") +obs_index <- 1 +plot( + y_hat_train_samples[obs_index, ], + type = "l", + main = paste0("In-Sample Predictions Traceplot, Observation ", obs_index), + xlab = "Index", + ylab = "Parameter Values" +) +``` + +## Python ```{python} -#| eval: false -# Fit a model to summarize +# Python implementation coming soon +``` + +:::: + +# Causal Inference + +We now run the same demo for the causal inference use case served by the `bcf()` +function. + +Below we simulate a simple dataset for a causal inference problem with binary +treatment and continuous outcome. + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Generate covariates and treatment +n <- 1000 +p_X = 5 +X = matrix(runif(n * p_X), ncol = p_X) +pi_X = 0.25 + 0.5 * X[, 1] +Z = rbinom(n, 1, pi_X) + +# Define the outcome mean functions (prognostic and treatment effects) +mu_X = pi_X * 5 + 2 * X[, 3] +tau_X = X[, 2] * 2 - 1 + +# Generate outcome +epsilon = rnorm(n, 0, 1) +y = mu_X + tau_X * Z + epsilon ``` -## Credible Intervals +## Python ```{python} -#| eval: false -# Pointwise posterior credible intervals +# Python implementation coming soon +``` + +:::: + +Now we fit a simple BCF model to the data. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 1000 +general_params <- list(num_chains = 3) +bcf_model <- stochtree::bcf( + X_train = X, + y_train = y, + Z_train = Z, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params +) ``` -## Trace Plots +## Python ```{python} -#| eval: false -# MCMC trace plots for sigma^2 and other scalar parameters +# Python implementation coming soon +``` + +:::: + +We obtain a high level summary of the BCF model by running `print()`. + +::::{.panel-tabset group="language"} + +## R + +```{r} +print(bcf_model) ``` -## Partial Dependence Plots +## Python ```{python} -#| eval: false -# Marginal effect of a single covariate +# Python implementation coming soon ``` -## Variable Importance +:::: + +For a more detailed summary (including the information above), we use the `summary()` +function. + +::::{.panel-tabset group="language"} + +## R + +```{r} +summary(bcf_model) +``` + +## Python ```{python} -#| eval: false -# Split-based variable importance +# Python implementation coming soon +``` + +:::: + +We can use the `plot()` function to produce a traceplot of model terms like the global +error scale $\sigma^2$ or (if $\sigma^2$ is not sampled) the first observation of +cached train set predictions. + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(bcf_model) ``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +For finer-grained control over which parameters to plot, we can also use the +`extractParameter()` function to pull the posterior distribution of any valid model +term (e.g., global error scale $\sigma^2$, prognostic forest leaf scale +$\sigma^2_{\mu}$, CATE forest leaf scale $\sigma^2_{\tau}$, adaptive coding +parameters $b_0$ and $b_1$ for binary treatment, in-sample mean function predictions +`y_hat_train`, in-sample CATE function predictions `tau_hat_train`) and then plot any +subset or transformation of these values. + +::::{.panel-tabset group="language"} + +## R + +```{r} +adaptive_coding_samples <- extractParameter(bcf_model, "adaptive_coding") +plot( + adaptive_coding_samples[1, ], + type = "l", + main = "Adaptive Coding Parameter Traceplot", + xlab = "Index", + ylab = "Parameter Values", + ylim = range(adaptive_coding_samples), + col = "blue" +) +lines(adaptive_coding_samples[2, ], col = "orange") +legend( + "topright", + legend = c("Control", "Treated"), + lty = 1, + col = c("blue", "orange") +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: diff --git a/vignettes/tree-inspection.qmd b/vignettes/tree-inspection.qmd index cfa84450c..369cd778c 100644 --- a/vignettes/tree-inspection.qmd +++ b/vignettes/tree-inspection.qmd @@ -1,114 +1,273 @@ --- title: "Tree Inspection" +bibliography: vignettes.bib --- -Examining the structure and parameters of individual trees within a fitted -`stochtree` ensemble — useful for interpretability and debugging. +While out of sample evaluation and MCMC diagnostics on parametric BART components +(i.e. $\sigma^2$, the global error variance) are helpful, it's important to be able +to inspect the trees in a BART / BCF model. This vignette walks through some of the +features `stochtree` provides to query and understand the forests and trees in a model. -::: {.callout-note} -This vignette is under construction. Content will be ported from: +# Setup -- **Python**: `stochtree_repo/demo/notebooks/tree_inspection.ipynb` -- **R**: pkgdown article `TreeInspection` -::: +::::{.panel-tabset group="language"} + +## R + +```{r} +library(stochtree) +``` + +## Python -## Setup +```{python} +# Python implementation coming soon +``` + +:::: + +# Demo 1: Supervised Learning + +Generate sample data where feature 10 is the only "important" feature. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -library(stochtree) +# Generate the data +n <- 500 +p_x <- 10 +X <- matrix(runif(n*p_x), ncol = p_x) +f_XW <- ( + ((0 <= X[,10]) & (0.25 > X[,10])) * (-7.5) + + ((0.25 <= X[,10]) & (0.5 > X[,10])) * (-2.5) + + ((0.5 <= X[,10]) & (0.75 > X[,10])) * (2.5) + + ((0.75 <= X[,10]) & (1 > X[,10])) * (7.5) +) +noise_sd <- 1 +y <- f_XW + rnorm(n, 0, 1)*noise_sd + +# Split data into test and train sets +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) +y_test <- y[test_inds] +y_train <- y[train_inds] +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Sampling and Analysis + +Run BART. + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_gfr <- 10 +num_burnin <- 0 +num_mcmc <- 100 +general_params <- list(keep_gfr = T) +bart_model <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, + num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, + general_params = general_params +) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +Inspect the MCMC samples. + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot(bart_model$sigma2_global_samples, ylab="sigma^2") +abline(h=noise_sd^2,col="red",lty=2,lwd=2.5) +plot(rowMeans(bart_model$y_hat_test), y_test, + pch=16, cex=0.75, xlab = "pred", ylab = "actual") +abline(0,1,col="red",lty=2,lwd=2.5) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +## Variable Split Counts + +Check the variable split count in the last GFR sample. + +::::{.panel-tabset group="language"} + +## R + +```{r} +bart_model$mean_forests$get_forest_split_counts(10, p_x) +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +bart_model$mean_forests$get_aggregate_split_counts(p_x) ``` ## Python ```{python} -#| eval: false -import stochtree -import numpy as np +# Python implementation coming soon ``` :::: -## Fitting a Model +The split counts appear relatively uniform across features, so let's dig deeper and +look at individual trees, starting with the first tree in the last "grow-from-root" +sample. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Fit a model to inspect +splits = bart_model$mean_forests$get_granular_split_counts(p_x) +splits[10,1,] ``` ## Python ```{python} -#| eval: false -# Fit a model to inspect +# Python implementation coming soon ``` :::: -## Accessing the Forest Container +This tree has a single split on the only "important" feature. Now, let's look at +the second tree. ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Extract forest container from fitted model +splits[10,2,] ``` ## Python ```{python} -#| eval: false -# Extract forest container from fitted model +# Python implementation coming soon ``` :::: -## Inspecting Tree Structure +::::{.panel-tabset group="language"} + +## R + +```{r} +splits[10,20,] +``` + +## Python + +```{python} +# Python implementation coming soon +``` + +:::: ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Navigate individual trees: split variables, thresholds, leaf values +splits[10,30,] ``` ## Python ```{python} -#| eval: false -# Navigate individual trees: split variables, thresholds, leaf values +# Python implementation coming soon ``` :::: -## Summarizing Split Frequencies +We see that "later" trees are splitting on other features, but we also note that these +trees are fitting an outcome that is already residualized by many "relevant splits" +made by trees 1 and 2. + +## Tree Structure + +Now, let's inspect the first tree for the last GFR sample in more depth, following +[this scikit-learn vignette](https://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html). ::::{.panel-tabset group="language"} ## R ```{r} -#| eval: false -# Variable importance via split counts +forest_num <- 9 +tree_num <- 0 +nodes <- sort(bart_model$mean_forests$nodes(forest_num, tree_num)) +for (nid in nodes) { + if (bart_model$mean_forests$is_leaf_node(forest_num, tree_num, nid)) { + node_depth <- bart_model$mean_forests$node_depth(forest_num, tree_num, nid) + space_text <- rep("\t", node_depth) + leaf_values <- bart_model$mean_forests$node_leaf_values(forest_num, tree_num, nid) + cat(space_text, "node=", nid, " is a leaf node with value=", + format(leaf_values, digits = 3), "\n", sep = "") + } else { + node_depth <- bart_model$mean_forests$node_depth(forest_num, tree_num, nid) + space_text <- rep("\t", node_depth) + left <- bart_model$mean_forests$left_child_node(forest_num, tree_num, nid) + feature <- bart_model$mean_forests$node_split_index(forest_num, tree_num, nid) + threshold <- bart_model$mean_forests$node_split_threshold(forest_num, tree_num, nid) + right <- bart_model$mean_forests$right_child_node(forest_num, tree_num, nid) + cat(space_text, "node=", nid, " is a split node, which tells us to go to node ", + left, " if X[:, ", feature, "] <= ", format(threshold, digits = 3), + " else to node ", right, "\n", sep = "") + } +} ``` ## Python ```{python} -#| eval: false -# Variable importance via split counts +# Python implementation coming soon ``` :::: From 32dac5aa6a2466b13e505dab39dd64601077b4ca Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Fri, 20 Mar 2026 17:27:16 -0500 Subject: [PATCH 3/8] Updated vignettes --- .gitignore | 1 + README.md | 115 +- vignettes/.gitignore | 7 +- vignettes/Python/IV/IV_CDAG.png | Bin 135771 -> 0 bytes vignettes/Python/IV/iv.ipynb | 1130 ------------------- vignettes/Python/RDD/RDD_DAG.png | Bin 38039 -> 0 bytes vignettes/Python/RDD/rdd.ipynb | 475 -------- vignettes/Python/RDD/trees1.png | Bin 40369 -> 0 bytes vignettes/Python/RDD/trees2.png | Bin 24832 -> 0 bytes vignettes/Python/RDD/trees3.png | Bin 25988 -> 0 bytes vignettes/R/IV/IV_CDAG.png | Bin 135771 -> 0 bytes vignettes/R/IV/iv.Rmd | 774 ------------- vignettes/R/IV/iv.bib | 79 -- vignettes/R/IV/iv.html | 1795 ------------------------------ vignettes/R/RDD/RDD_DAG.png | Bin 38039 -> 0 bytes vignettes/R/RDD/rdd.bib | 10 - vignettes/R/RDD/rdd.html | 1032 ----------------- vignettes/R/RDD/rdd_vignette.Rmd | 354 ------ vignettes/R/RDD/trees1.png | Bin 40369 -> 0 bytes vignettes/R/RDD/trees2.png | Bin 24832 -> 0 bytes vignettes/R/RDD/trees3.png | Bin 25988 -> 0 bytes vignettes/_quarto.yml | 2 +- vignettes/bart.qmd | 17 +- vignettes/bcf.qmd | 249 ++++- vignettes/custom-sampling.qmd | 168 ++- vignettes/ensemble-kernel.qmd | 329 +++++- vignettes/heteroskedastic.qmd | 279 ++++- vignettes/iv.qmd | 136 +-- vignettes/multi-chain.qmd | 237 +++- vignettes/multivariate-bcf.qmd | 367 +++++- vignettes/ordinal-outcome.qmd | 155 ++- vignettes/prior-calibration.qmd | 186 +++- vignettes/rdd.qmd | 320 ++++-- vignettes/serialization.qmd | 440 ++++++-- vignettes/sklearn.qmd | 214 +++- vignettes/summary-plotting.qmd | 144 ++- vignettes/tree-inspection.qmd | 174 ++- vignettes/vignettes.bib | 102 ++ 38 files changed, 3055 insertions(+), 6236 deletions(-) delete mode 100644 vignettes/Python/IV/IV_CDAG.png delete mode 100644 vignettes/Python/IV/iv.ipynb delete mode 100644 vignettes/Python/RDD/RDD_DAG.png delete mode 100644 vignettes/Python/RDD/rdd.ipynb delete mode 100644 vignettes/Python/RDD/trees1.png delete mode 100644 vignettes/Python/RDD/trees2.png delete mode 100644 vignettes/Python/RDD/trees3.png delete mode 100644 vignettes/R/IV/IV_CDAG.png delete mode 100644 vignettes/R/IV/iv.Rmd delete mode 100644 vignettes/R/IV/iv.bib delete mode 100644 vignettes/R/IV/iv.html delete mode 100644 vignettes/R/RDD/RDD_DAG.png delete mode 100644 vignettes/R/RDD/rdd.bib delete mode 100644 vignettes/R/RDD/rdd.html delete mode 100644 vignettes/R/RDD/rdd_vignette.Rmd delete mode 100644 vignettes/R/RDD/trees1.png delete mode 100644 vignettes/R/RDD/trees2.png delete mode 100644 vignettes/R/RDD/trees3.png diff --git a/.gitignore b/.gitignore index 0c5a93ab1..a64416ef9 100644 --- a/.gitignore +++ b/.gitignore @@ -39,4 +39,5 @@ yarn-error.log* /python_venv /cpp_venv /venv +.venv .Rproj.user diff --git a/README.md b/README.md index 90c114d00..e6208b9f0 100644 --- a/README.md +++ b/README.md @@ -4,72 +4,61 @@ ### MacOS -#### Cloning the repo +#### Software dependencies -First, you will need the stochtree repo on your local machine. -Navigate to the `documentation` repo in your terminal (*replace `~/path/to/documentation` with the path to the documentation repo on your local system*). +You'll need to have the following software installed -```{bash} -cd ~/path/to/documentation -``` - -Now, recursively clone the main `stochtree` repo into a `stochtree_repo` subfolder of the `documentation` repo - -```{bash} -git clone --recursive git@github.com:StochasticTree/stochtree.git stochtree_repo -``` +- Python: can be installed via [homebrew](https://formulae.brew.sh/formula/python@3.14), [conda](https://www.anaconda.com/download), and [directly from the python site](https://www.python.org/downloads/) +- R: can be installed via [CRAN](https://cran.r-project.org/) or [homebrew](https://formulae.brew.sh/formula/r) +- Quarto: can be installed [directly from the Quarto site](https://quarto.org/docs/get-started/) or [homebrew](https://formulae.brew.sh/cask/quarto) +- Doxygen: can be installed [directly from the Doxygen site](https://www.doxygen.nl/) or [homebrew](https://formulae.brew.sh/formula/doxygen) -#### Setting up build dependencies +#### Setting up R and Python build dependencies -The docs are largely built using [`mkdocs`](https://www.mkdocs.org), [`pkgdown`](https://pkgdown.r-lib.org) and [`doxygen`](https://www.doxygen.nl/index.html), -with everything tied together using the ["Material for MkDocs"](https://squidfunk.github.io/mkdocs-material/) theme. - -We first create a virtual environment and install the dependencies for `stochtree` as well as the doc site (several python packages: `mkdocs-material`, `mkdocstrings-python`, and `mkdocs-jupyter`). +Building multi-lingual (R and Python) vignettes requires installing the vignettes' package dependencies. In Python, this is done via a virtual environment (local `.venv`) ```{bash} -python -m venv venv -source venv/bin/activate +python -m venv .venv +source .venv/bin/activate pip install --upgrade pip pip install -r requirements.txt +pip install git+https://github.com/StochasticTree/stochtree.git ``` -##### stochtree - -Now, we build the `stochtree` python library locally in the virtual environment activated above +And in R, this is typically done as a global system install, though you might also consider [`renv`](https://rstudio.github.io/renv/) for managing project-specific R dependencies ```{bash} -cd stochtree_repo -pip install . -cd .. +Rscript -e 'install.packages(c("remotes", "devtools", "roxygen2", "ggplot2", "latex2exp", "decor", "pkgdown", "cpp11", "BH", "doParallel", "foreach", "knitr", "Matrix", "MASS", "mvtnorm", "rmarkdown", "testthat", "tgp", "here", "reticulate"), repos="https://cloud.r-project.org/")' +Rscript -e 'remotes::install_github("StochasticTree/stochtree", ref = "r-dev")' ``` -##### pkgdown +#### Building the vignettes with quarto -To use `pkgdown`, you need to install [R](https://cran.r-project.org). -One R is installed, make sure the dependendencies of the pkgdown build are installed +The vignettes live in the `vignettes/` directory and are configured as a standalone Quarto website via `vignettes/_quarto.yml`. Each `.qmd` file uses `{.panel-tabset group="language"}` tabsets to present R and Python code side-by-side. Python cells are executed via `reticulate`; set the `RETICULATE_PYTHON` environment variable to point at your `.venv` interpreter if it isn't picked up automatically. -```{bash} -Rscript -e 'install.packages(c("remotes", "devtools", "roxygen2", "ggplot2", "latex2exp", "decor", "pkgdown", "cpp11", "BH", "doParallel", "foreach", "knitr", "Matrix", "MASS", "mvtnorm", "rmarkdown", "testthat", "tgp"), repos="https://cloud.r-project.org/")' -``` +To render all vignettes at once: -### Building the R docs +```bash +cd vignettes +quarto render +``` -To build the R docs, first run a script that lays out the package as needed +To render a single vignette: -```{bash} -cd stochtree_repo -Rscript cran-bootstrap.R 1 1 1 -cd .. +```bash +cd vignettes +quarto render bart.qmd ``` -Then run the `pkgdown` build workflow to put the R docs in the correct folder +To preview the vignette site locally with live reload: -```{bash} -mkdir -p docs/R_docs/pkgdown -Rscript -e 'pkgdown::build_site_github_pages("stochtree_repo/stochtree_cran", dest_dir = "../../docs/R_docs/pkgdown", install = TRUE)' -rm -rf stochtree_repo/stochtree_cran +```bash +cd vignettes +quarto preview ``` +The rendered site is written to `vignettes/_site/`. Individual vignettes use `freeze: auto` in their frontmatter, so re-renders only re-execute cells whose source has changed. To force a full re-execution, delete `vignettes/_freeze/` before rendering. + ### Building the doxygen site for the C++ API First, ensure that you have [doxygen](https://www.doxygen.nl/index.html) installed. @@ -87,46 +76,8 @@ doxygen Doxyfile cd .. ``` -### Copying Jupyter notebook demos to the docs directory - -```{bash} -cp stochtree_repo/demo/notebooks/supervised_learning.ipynb docs/python_docs/demo/supervised_learning.ipynb -cp stochtree_repo/demo/notebooks/causal_inference.ipynb docs/python_docs/demo/causal_inference.ipynb -cp stochtree_repo/demo/notebooks/heteroskedastic_supervised_learning.ipynb docs/python_docs/demo/heteroskedastic_supervised_learning.ipynb -cp stochtree_repo/demo/notebooks/ordinal_outcome.ipynb docs/python_docs/demo/ordinal_outcome.ipynb -cp stochtree_repo/demo/notebooks/multivariate_treatment_causal_inference.ipynb docs/python_docs/demo/multivariate_treatment_causal_inference.ipynb -cp stochtree_repo/demo/notebooks/serialization.ipynb docs/python_docs/demo/serialization.ipynb -cp stochtree_repo/demo/notebooks/tree_inspection.ipynb docs/python_docs/demo/tree_inspection.ipynb -cp stochtree_repo/demo/notebooks/summary.ipynb docs/python_docs/demo/summary.ipynb -cp stochtree_repo/demo/notebooks/prototype_interface.ipynb docs/python_docs/demo/prototype_interface.ipynb -cp stochtree_repo/demo/notebooks/sklearn_wrappers.ipynb docs/python_docs/demo/sklearn_wrappers.ipynb -cp stochtree_repo/demo/notebooks/multi_chain.ipynb docs/python_docs/demo/multi_chain.ipynb -``` - -### Copy static vignettes over to docs directory - -```{bash} -cp vignettes/Python/RDD/rdd.html docs/vignettes/Python/rdd.html -cp vignettes/Python/RDD/RDD_DAG.png docs/vignettes/Python/RDD_DAG.png -cp vignettes/Python/RDD/trees1.png docs/vignettes/Python/trees1.png -cp vignettes/Python/RDD/trees2.png docs/vignettes/Python/trees2.png -cp vignettes/Python/RDD/trees3.png docs/vignettes/Python/trees3.png -cp vignettes/R/RDD/rdd.html docs/vignettes/R/rdd.html -cp vignettes/Python/IV/iv.html docs/vignettes/Python/iv.html -cp vignettes/Python/IV/IV_CDAG.png docs/vignettes/Python/IV_CDAG.png -cp vignettes/R/IV/iv.html docs/vignettes/R/iv.html -``` - ### Building the overall website -To build and preview the site locally, run - -```{bash} -mkdocs serve -``` - -To build the files underlying the static site, run +The overall site is built and deployed via the GitHub Actions workflow in `.github/workflows/docs.yml`, which renders the Quarto vignettes, builds Doxygen (C++ API) and pkgdown (R API) docs, and publishes the result to the `gh-pages` branch. -```{bash} -mkdocs build -``` +To build and preview the vignette site locally, use `quarto preview` as described above. Full-site local builds (including embedded Doxygen / pkgdown output) are best done through the CI workflow. diff --git a/vignettes/.gitignore b/vignettes/.gitignore index 6041614a6..0ff578e35 100644 --- a/vignettes/.gitignore +++ b/vignettes/.gitignore @@ -1,4 +1,9 @@ /.quarto/ **/*.quarto_ipynb _freeze/ -_site/ \ No newline at end of file +_site/ +*.Renviron +*.json +*_files/ +*.rds +*_libs/ diff --git a/vignettes/Python/IV/IV_CDAG.png b/vignettes/Python/IV/IV_CDAG.png deleted file mode 100644 index 7900ff501a89a14d1895ce6bc6a217ea24d6118d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 135771 zcmeFZ^;=cjyElx8n-W+w!lJuDi3QS)bc29MBNB`5u0^MUQUXd#s)R_VG)PLfpmgUu z)_&e+d(Ls6_YZil?=Rv7%3O2Id))V@#*9=~mB+y%$3j6t!BJF@(L_PH14Ti({T|~l z@SSCbqIB@(mW!sm6iV3u^alzG4T_?Sq_*dipPA^j#A8#R^voOsW9L$E>4I^FWqxX+ z`Ls}du9Q{zHmSrhTv*v|^E2|!t&Z?7dhMtrcfYXG$>M)|8hsN`UVAJE4RXQlE4p|x z!9TJ1v-+g3)aSiuoA;X6{0XV}_33ha3)3x31qoDwgg`U~n*aV+8g$eqb$DM;?&p8` z;s5zBbhqxvGY9|iKmXs~mq2mrPS=cjJ^lZ71&Nknq5pBw-ydZ~PNIcZFlAfeUq2l9 zcj|Cg-2dgHOQ6=_gO85$?qSlO_w|1+K?Kdb{cm@T{HcTk8WGe*`bXUVT#|o2-5vS$ z|Lqqu{O7j+S*HK~;Q#Zq|EQV&6#2jRmjCSSKU>d#TH2p$g6!b`>E!>t&HiVA{bz3f z4@mYOxbg>x{0~a|4@EAam}a4IXk5JBya@Vh$x?%3ur??<{em`xq9e;zM>O-90@ z54paWzajDTJ?Iw<;{5Kv7i|*y^SIaK`u=s}<$f>vb^qY%OGB>G+NG$ylNWkVgsvM$ z9OdySYB! zD$m0&-;(#SnTCh zj|!W%>-SDNRbI;^R$4}r>{&;Xa4xOe5fr`$WB>V9_cZsPvk#v@LffM41wsp-HZi-W zqkHSpXS~$F^g7ifyk^e#0ao12*vpb5jWnkV|dm|O5`%$+6Z#H!SuEL#)sN5fBlI&;##SG7s^`v^6Rcb_0-Cc(l*r}8!4b`FH?vo0eN`1I zrXz@OzEJ7MSRdu|S3dGj7(FQcG;N#XvOch9U7(uTI=xj&DDZPe=-U3*$BDS5JQY-a zv0x_bv09RKoRP=5g)bxBfV*ULZH zlfs)mG&m34{r%)tE+W5Id|u9E53Q7I&(_l0NKJh4qZ>Jj7=s{tTskRh{6?WLJf*0p zP|1P`Qbw3F$Mt{^tuvj}zGLeLe44Y&nS`F$SJ#biUF_x?{q#L%-K68GM03A8^>Wt4 zG|5XQ*>?~xH$Bt+%yn?M7yCj%qNS3+)Q9Dd;tNFkszwv(P6YQ_iezqWk0fkMpq0iF zy641~|8*o9`;}apw1`Y!UZK*}`BB?uNjcTyPj>e<$KO*3%({-UY`*i2{=v~pzwz!s z=9Hn-XI!nrCSUax!JuPnDCTh3*}%gcnMI-&R}Id{+*(Q}G3PRjnAv(aY~y;@$o2Xj zru6n}bO&_YS|$CMBIp{SO!ms*o*>Tq&2hd z-&xqU>$l1@p0} z5IvazeTT@nNYxDLmm%zCO9aKN%o1|3A`|oVbQO)G5*5ZOVMSkAYHjzJepWE_*<(lF zB3%9Xc?6_rx3*7fn397YXla&c8(oZl3TE9A%`x{LG`kegOo4`v1J{=gBKqzQjSe?RFq@2cg>{8$k!-3YggY)1M@j*P_$f3ZV= z0B92zziMtPKk-})lZyLlN4>#(M)`x@utfKLD{TanK)Nkmz@A)IKOL<&7<3;U^^>%J z(b0%77LMsdJxAUunl8OnTx`jaZu2{tC6yIuO|Ir#)ngEO>a*^I^o%go*g!kMDxt z-fJ4wV9WUjoI@l?r~W~7YAh{hYU|J1DJ|YWoW(7ozl} z>vdl)Q(X4?r8p{9+yL6YY{ueH2?9bBpI;w$zk;W9GUvk`0~NYoe=c_MvpPFWUF+~2 zvgNpKjhljcimoGf;@=m>Z8G7iF)FO?B5k2v_`aH?2IOVsm~q#_tO3c@4SyxS}MI`Bdq`2`GsR>kfF7K zObXc#-yNGomD6}=buFnLy4$6R2V6^yy}~5SpH#Xy{?(=-Pfa=g*rlIEc*$;sIj5bq zy3Ws6;5@_BW&FFI!Gfl^5i-KmficXtPqjNZi9NS4s%dw zJwqMLJr)6yknY;I&!nC1p>@(qhlheu5%z2}zXL}jiH?uauq%$Tts~Wl;5DBERJsha zyv#w?_44&ME>IL7pNss8{U2NiQ{l~H0tp-F11sgz!%?+1&YBql=>jJKO|tyA3dbcT zS8R4)j>dERB5-s3KD%pJ*iBVg-pzjEP`6radBb+_7B15IK$y~c;O;WBGRAUOEH-lT zfq|iybM7Y0T>H`fy8y|=kJ-;2Hs5NUCjCGEGUMMdBXwkVnTh(Lj}_O!H+`)0#~4+= zg9hnt1Xzv7_jFhP=ZaduxfT(Hl0du6j0chMkI%haIa*zDw>+}!d(c?u9=LOu{xEL^ z2!U{|?-gO&sFH5A1DNWQ&&$7t z*?s<_Ap82&D86as^NBKE<*UJfYZsfLH$kdhmJ7e+15|;dU_kpXLvI@mBPR>yWR@oC z}fkI53fa{aOCToI+gS+#3w;DcC){LB5-5z<5~JQx(CV2Yup zF8jP)uRp}_9X^FWZrJ31RWaCj5iaUJFM8s_a!l#J-^08J@G)MVd^|348E-p`CYei! z$$tW%n~l$51{Re-vc`J+s`}@Z9^A-1d|UF5GNK8}##pd}f=ykT<$H5=(v=Ylmd&=0 zJMHn-`1`yegz}c&nav$dtZ)zO%0PdMa4%TUmkF`g`*R8rtNaZE<3-MG((ZdX*Y0&vm~VI5 zwM|Vn1>6jMvi}ZWNt6%Qeg9bJkd^rAy6Jqggvau0%I-jS;-VU)T8;`c39r4{msVP<$f-kWH_2@5#Tkg$hUz;ALG) z+=R@kF}dbFf3rfU$Ba{sR)H$7a_Xb*PsNP8^EcOmzQWXvikq?5)jNwU_CCTJ2pn<5)0kKhhk3j3uHKJONgzdnM;B4Y$KWd+%<2nqlD7p zG`e6&Fpp$U9Ib@4h}mDd{4kBO{cH zRaYLfl`)knt5(kj0a z5z6pC{}E&qSA;JYo4l7Gjf+Y7(EaYiody@1$%-7bl6$wDg$G{qvxjSC`)t=~ctZJY zwP|mdmUhPt9Vw+t;zf@qS*XeZ4KzJlO)a+)-E%EQx~iK3bXkA)(>LUV5_avg3fppM z9V-dFSZd076+*lOo-5<$u)#U!@HR@k5IrJ-N!qo^>{o3*6WfB3MX^diy zEkwXRF2_BT)0;_@r@GhXWIvXUk~6m46(4WzJ#Jtv%lit4_kQfw9&-jn_-aL$v4-B< z-qZJpYNA-=;hnDHw>y=8HW&UFl-zGaInw52+dz!CCtdOkq1K-+h`05==1J|3{B)^2 zx!6{=l2U^1+cqWFXz=$0B;sH!@VKn?!IxtNJ2T>nG*A${S3vWi5}2|`-gsAPYw*ubNO=$(*(Vv3nb&W zpZ>sO)}FpNX&#Sh0)C@Zhb+b&gCe&7n+2$*gu$PdS||sRVMlWiud&@g7RO?aywjsw&d&J1Yuzonk!6q&}pyq&jvacg)$an;(fNd zyIrWUm>bQ7r47rcmHKEq#EQXU_y;I&9wB^l{|IaVLRHbDZ)p>b zGqp}(nHmghA4XN92Re;CX8B72|BQ-Nim!j{o3wkTCU(XU-cJn>c4aWXOQ2jOS+O>? z|LNjlnt2@N^RhZCns3ugK^$NLum9Twj%JjI>*Q`aXKN5LlOf73R2OxeYdH0vi3M26 zz4mZKDQ}?`NL`&$yq{jH1%6#dj)xbWCZNrbS!K$|x31!e9uDchYfwyP3W9s5BTMGr zasw%mC^|*RkHzDCxB+j&5hMc5WHDbQJsin!W}JIDHC(0aWI{iT`JxsnK#03QKe^gn z2*C>%a#jUPjy(?^nH*Yw@0qqC<;m+-fI(1~dx!(Yi-rPD}0Z@Afre zmF8XH7duS>IYZMb0#gj7HFMsJub-Di>O|Xrj7A|Y-U(O;;&JawOB;3C{8ki(n^aQ` z8o6oHt=p}0KEH^=tUA`F`LBCNC;-3AS-1Xk^LD{KlM|Okc1Edn%fxH-1rid$!Tq+QN+||*j!jls_QJnu$aj1lTXW`E zicI=ZADnmaZ%qXCmh<7FjSpycxfRIK);a$Sp{UK2|CnD0>Lj%3z!BFYtsp8SiX>IL zYw3=EK`W}Q`lB+LQfln{jzPUP8JXVn9 zZ3_bm(cc2txW0Mp5{el&@&B@~RKXq*!?>Y(z|`>2R=f6eDZ1@sK7d+06!8JjrHEL8 zX_gGnrN~M;UevemJuw#i{Z@wM&AGa)v%Hu@z(v9a9teWZL;PvdEV_0oDm@UlYl$kr zhx-qbppQVQJBL?jaQww={&j4k!nBR&X>;HZ=&108Aars8!_b4j_PkDa7fm-mJo{VF zXb!rKO$5mm1k&A5IRLpeYT0ye%~Pw9Jp%7v#tIR}jN8eq>FygzUZZ0`Be)voinI!L z*Tqe!Uh_SJp4QY+L_3Z^9^Z5{cr8{7x#W`h{dkwkMdYA{xVmSZfv+n3<89i`DEwk6 zoxZ3GP|1hK$439mF{6%MyyG0?8n3 z&SaT(`tO?;$5)upVAGm+>Y`j+ju($Jv_vX~Z_~jKR$IyY@=k z>tS|siW%yj2O<V>aUdZ5WLmK7LbKLkyv z&62xew>7QT>D>$6Bx8KSH=cdB@$TXP&Suzv9sr=~9dclcc*2$t^in{kSh8YQ1>3hn zX#4o_@0G+r?cq=Krz5XAm4&Z-yn{{B9uN=ZXY0E>9OXd{YKNIx7OwphButitC~h}h zY=53yUvX`J-`y$o8izMA`}zBix1!xKunkI#`)}uSyKM#vA*M%BeS{tX!3Ir zr)i-|ZC$qQANCm-X@WW+G@;)6D?N*!zB^#woKG}ib(q!u1^Vm*SI{6b+^3)Iom;az zplb65>=zZZ4xv+FI5wVHc><1yEddaBcE!yt^iX&E@8N)Hkq8#jZGQ1B1I%q^BA+uZ;&W)UeGAct8y+{>Gt6PiM!VdamcFg6qNic)LQ!0X8u{KOURi;EOt00Ie}%nA)Fg`^$P3kLT_J z>Wf&xIoRRGFg?I74}Ufs42-a8g2y8(yzBm3L(*nOHDIQvqeG=zT8y~4_E1*7SxHt4 zuX9-s2Xjg=ToEw72_64)L@XlVi}w*0=i?6;#-H7=w@SabKQ+yoNn*V*C%E75p}X3) zhJ+_dG|eB6k;I$3>C)NtJ)^uQ5I9?@Z%(^B-+#9)d7oSTU3UhpRxbdIFy7c66~S2| z?C!VY#X2Qq!A7t|Z{mk`GYbm}BBo?V0e0*l*3^Jsz#^dmF z^7LW!J!tewgj67djPR->$4*@VWRn4PNtZ$7VPxhr(=WA4_Zj0Ow(twgBXB8mt1E%j zrD4OsKta0l7Z6On-L^W1q@NUYw+e5SYR!!vUm+KrEw;J&VjJ6I-c9cmG)!O@5Mp)B z0vytY6p6HyX_#I$eHsaWJ57b~VZ=k9x&|c{&R$lVOQT+XHQFX_%d8|&V=%*?$L?LtlmT_R_wSpt?LHPTzj3~A_WGJq7kjH+cA#@C}aGskjT zBU7|dKBLi+eLfJ%U{F45T6rWo`c~%BkWjf zI$7beULlt$oHIcFyxm37i=o6;S)>e&L45|zbGMEE7jPr~#{3QAPAW36)-Nh#`~i8w z{1Y1KXjN!sP1on})fajsxdf*aE)hJJ$!cOka~`iA&TU0KpJRcMOn81b{0ABdkFD1kj+1F2WZur5?GUPplZVrl(VoMrem%uv(L1Y^7K|nt->A&G%nO}k$gq8cSKYFDAmM|*dNe$ zePW6N`e-LK1vblNw59o;GXzCa<1n+)>u7BB0u!KzU;?KpHfMaL&bDsOJLg$4lW9Ny z#9jOEpZEo*tL|oHzW^;NsyT&KE4TU*c;oSgwnlmGvu+cC<`;!|arZO4i#Eou@o*@P z^*vV-Bh0;1Jm$O#h8BTHF9P*iN!-g)S)_Fd7{9Jd_>~iE?i5?|1^mYU7h}FqMPXi0 z;%x5VFq%&lcWnN z_9L?L-B=g>dt*R^VDuCyjh`ej_dPJf&(4`@34y6x$~_Eeq9q$*X^0J0NhU7 zm*O!HJ7_{J<;~SjK*XTfjfipwiGFafGv^0 ztVMEnh;PbTL-8)_)-4A0L$oIV^6u{Ua?4k4fEmWQYXc-$*mBGRsos~IZRP$)B^9k$ z>_npMw=OeKhOww)s3^B7U_5v4-G93qp6Nc#Iyd13pZu)HO!YseF zSG&L$hyc@_d-$wds!=)uSHtM)mtz2hbXk$JJ;r0%LH!ee9~b&MGl5WA-jPCDg#XA7 ziQ0Xujj|i4g!D*c0~34&_F-1i`$%-iqFn7fRFMKy_d2sf0!!aL`WuL zp-_90<&%q@`3+K}!LbJTKpdU~$GOHw7Y1ToC9m+hfL(ZWgia`%i{ty^3#Jo6fnuE- zumbFXfI@0gQjr?HL5NiCnV%y;qW*U+Z4#-)Loa8Hz7l}Zp_T>rB@B?Caw5v$| zwHCx01SW8QO@G7&o?ZMvf(Uw-pP~dRHvXHhyGZMXnrN|8HI@$#Xn;|8p+wQe?jvt6 zSn23bh_K2M2SP5G>iF@gO4!hO!bTNngDGBe@?$kECava70g~kK;-ENF>V?1Sovugn z+qZt-!Y`V@H{CBo`J@nvjIy%_zzLMi) zWUaXSE{2h|0Wq1B^r%(}Q@kKpFuG^u>0$=ld%JcyP9tp=2Oq_!pshaT^He`vPO+ zhJJfypOq9&1Z}z!XvB?w;a|@Ur+UjmYfE$W&_{Ju)ZJh{ez%9r*nP?psk3tW#^g$a zju|_A7fjg3U~_|@@uU-0*mzRV2a4bO>po`1u2NjMXH;LkD-FSvaq1*zj3tKE>K2J#&0=%!TW7lI-mT29 z7#QI&Vt0x2Zjf5J0Iq4<#&F)U z^gZZe2I&&zGv-g*Qx7;!dQIepK|W?Y>KV(vFC@e>d_!TcdXJDPv`8zz{kZ^MEaVD8 zYTA0_6*7?7Ru2+2nHQ`LuUuBCdiXQ_;hfB=re4OmKO+W513DuvUo%pxzH^d9S#4~- zTKX`mh8IgNfW$K#8CtS&Cf@q)!?L7dE(1aZaF){qs?fc<&EgU|LBl-=04!ORp9=n2 zD;iSN9{W9zV+iZf%Ktkq5UxR=#b|vOT}sBnDF`+Wl%fr@8S{A2C9_0Xb$g8TCnRr( zCTI(mmAJnUVfXDm#8x7yHi(0_t|A^Bf;okYLvbBXZR&~@-+Ld`Ul)}?K?;B10}?ZZ zZ_?Q;zFs~LZc9|(`U-(M1-V>Bqb|qX`x{9BGv0LI=M=9v%@Tza&9>Y=sc$eY6%EeL>K1NbJNYW@tzW7#x> z9S!@u)A*1i%rr>1YSD&4kp3)P|FUA2MvY!IeuTu3cskOy%7vtcS(^MBEAKwxA-UKR7r2`4ePKOrXz z2YVJ<9q?Y&EFZO0^RU1$k054u-uXiZwrVQWxG<LxW+J34+P zJm(0eTB14mfC;uN;MLs1L%pLgT!*f_croF6IwUnFp@Q-Xpi_B1?R)PzOqTS_{2+UXXV!K7qIhmVMBaBwD?`+hr%cyEXDo=@dv^;Z`c?ofSLU!}TJ5 z>{~vpWFZ^rQU^kM=ScIt3q57^Rl22U{!U3F5OGR&COxhG*}`w8A)6qE+krc)h{fOjCs4h|);5`$=9@KWF`4!f| zWa?ay;rz&EE?0K)o#Q-~M_J0;?Rnr>YopzwI)NM3T+&p1 zoW)Lf3S3HPO8a!Ft{zK|SFRsDG3mqzM~uv*;wvc$$cT$ci>a=m=Te(lK9l-6vTI%T z_Ci;eVRaHKjlf49-A!4qR!qLbogZ16*VESy8*e%Rpl3;q(&rw(%tEYJ4`wFM0dGW2F z;K+ACJ~V3YX_!bPhE>m4gu-h!{bdGaFXqv8jB+tTd6Q}@Q(&}4uMd1Glrlto++_$Q zZ;4ZPpxS@i*>+!_ra7N&bmjnN#dcTv)V0TjMF<7z1A+#zLQMDsAcR;YzIN+GzqQWE zV98hQy=*i|C3OsTE}I>!W<_)LI%Xk_xG-72N9Y0f_2s&Fmsjwv0lrQb2-~5bBf;{@ zR|e7zpp6zT=&w0i{qoEUHvdH?@AEtTz!r)U-NKyZdq4cFiiYBWRo|f0C;iY$6yeRn zJk)JD65bwB4Sd`vtRFV~-=8^`R-CpkIs+cm0eN0*U@g6tG`a@_m}u3me!=e?;gZvU zf@~@UqGJK`fHPy^8_>@WfoO>50eP)iCKk$_F@Rw=wIfPLR_D|cskAKF7I<}nhqE~m zPg@<0Jz-3QPi0X-OJMA~*AZ5;@J71gZ@1X|eJ+}Od*zptxrg=VRYDX(_;Io7W0rZb zt>Kvup5$I3JHR$4bA3*v7KzcGd_pQrTwhj_YU7jE6IS~_lCQG{)0=ih*Q-{GcU}Uu z1Q#h8O07!;KtdmS%<+7%85UU!w#c6Hv2!k2au)?6^0JWAyaEpGOBb5Kl^y?+&scSO z2_8uzaeb?H4%Si!)ZQC8EL7Z&PTx1q>kir(0C$56TU7iv5w>N$WrPo7*S}k((A<)^ zkCo50{I<8zB`e@#50;wE7e)QJ1{sl>Uq$uq(j+Lrtg?3|SVLIHOfUsAl-KB%e z*6J092g|>ZIAU(*C!Cl)VU3uJ7w?1yBD{)Zt(SxEn06Lqhu^!G3TuVr-U}womQ0Xb zkgixfVG!6Co-m3^USKlWD`=>FUfSs0w;n~-<@QEoiDo?+O&)1Z7e^P$lJS(MT4!5Q za-IuhkGeNy%ix1dH*p%kXPolHp`thM$?& z>L{fa(qu9l(r}p+>7o8lL@LdGq%HV+5BLYdM82$N2KXZ*nRoNJ&b;P&uY&7ZrP<;n zv51^GnNK*3OncI<_u>O$E(|gbs!PrF_x*?pTJV*`tITs2YCp(4KVAn3@#9mk1pf!_{^GN)M~}h$WOmH^&-nMlu33aR7UJBexDP z`MfPw{tR{wDtwTa+a&U~tCTf*gWM2bX%j-JoGV5kdxgSH3CX}moCq$bH-2iLn`oJ<()|8jO)qxdW zHMN!=5zW=W7&x}uC_`&9$_wkIdZq1d)8FOr5{BFS>&TOLnC!G zwN$P{l1|DH+$oZYr)?puQtkJ&U~P!bnGK-ez%DXeau1TdURuXl&YT`n-x3YGbTLAc zZ^R>sEy~!^a1)9OHg~&+IVk;))UJISwP>0(T76%BF@?q8eR3Gr3-KQGFfCN{uYhA2 z;tVvpXQM_>gJ=Zvnum<1MNqoR*LaGEgk#lA;jpked2Ma zYg#b>`1cWd4*pvtPgymtfJsVBDr%sC?SaOH4Q1ib)+eHKt(;beX%79{}%Gl1JfFnoDz|92=H`Ko1Ic+Q5jwECT8H2o)_01E{+zBWj?GT%R9FAb#e>Ei%>54;-FO5H}q!7@&q-EC$lS`EVvdtS0o9_)XH{d97nx4S>67M zQ=3Vt6|A#nkbL1JdJ~MDyU^|5JJM7YCwRun4frXUYG(h#;PQFfJUYHea^s@u~3ekfYfPjxLPJHHsJs4{w!Y^{|(;a~JlHrwnp6Gd`L2Myk{gXT%oWSsIzxTE z8^How!dF~e!MfwCdE>cabd~L@7)4w+wYaGR$6kI<5!d`^`)j2U02Jfd@ zetal$%w}W88PqUJ*}RUxh$FICrZw3Tu$RircJ>?*Z0{`9Ps@nUz|Me=F0sipH{4ub zU|(1$bJy;+vcTv#)?e+U!fmNMTLjaOrCTcRH;s+0gFx+=HHQe{Z?tKNhH=z(D_AKs z3@`dc(m*Ftx8+h*Sc%t)2vg0%e!;foi}MhU^@G78-q)a3A0koZK#(D-E%{|-M5#^A zxW3)U!Ye{%oX0M!42|wqnHxgu$3^Q=oVpR!r%MnY;zHbvM+UrWI#&3+dbwXR_q?NB4yR_4%CeBGTy66I;)HT^&&NG?d| zJLECQLrjp(8CUVfd)lBSYrlH!rM*u}uq1GRt2#sdGt&EuV~a8XAwiy#Xztcu;>3xPEKhSX``NpK`NDLUwas`RYoZiQQ+c{#uE?V}F&Y+{ z-!jw5mvVI$psUbe6vpH+8OWxl{y+mGAn*4;>|mOa<`g0X`PwPV!eG5{1Y=9$(76BA zMZ}8!icdsE%8cvTQH9wt8r~-mC_J+croXa4dJ}AYi)p`fzJ@8nxXP3|192*y_TMVr z5JCgk|J=!vlw4=~~~Mw76HQcjE@ zP)86LNfXp;scbm3oxcE@#(diJ?>C{STcP$$!Ax62Wusy_npK5XQzSeoaqoo6k<94e zpX;ru68BITOlrPa8U@);r1?+kNrc%gtYrXCX8)PtG7#A+sX`zXWkL4K3K#8>F2X7` zv?mFVo4+-SREL(FHp{30Bg)s~l(Hpq%V=xdzBP*dJ()-4TdZMNRP{vrYlffnjkLRo zKJg-PMTy_JDKw2=8b$RArw|#PJw4pm{RaG0d1}885KR>uF7xEp60NWeBIiv1mm*@a zp4XuDhApum+W5|6ZsX^?@^`t(;|<4f(sr_1K3KKyll9a@$q%@>#x{;e9*?-w)e?f< zWt7D?ka@BUXZX7CapDR44sGwH{~C-{ps-#ALo{dEs~%u~PlEfn<}R?8UN*MBD8PdK z8k*+)XJc1bpmMnChbFT}L)wyJ?+6aau_2NyvUp1083YR95-SA`)nRkjv#hy7dCg>8 zQ7mQUNro}vR$OPQQ#xfi=7F{&BE?rnmCYG+g-YM%m7V(U!P83|N33s zJZ%+zRqR4q|6+IKUUMA8YM(w;9xlV8Z1^dpXE@94mSmQM z`{kJiHYEdPDQUx2c-x3&#Dt-IQ@f{@oT2KX-kSzQ6hYdwuSHyeX%MH%v;XUbE-kq# zg4IBh(U7(tc`~ox`zUu=P$yi~4pfB6<%+Fxxt>he#wJfzE&3FMq9`kaToqPEA(GJg z@e|1X=ym^~8@K`vQTzlabQ&YBLH9L6_hs^821so!k!gOJ{q&t=f0Do!ovRozQ{eR{5{)Aq5#>dHR-lEYX855Qp@F_aIO*39X- z4Q*=;Hz&%ek<$Lq7oU@~>JmDVbRDeb5IrB%qGI^5jFK+p07;DmTqcyRY4&rToNP;? zNHoBaEcAnB=c17mMr{y-Dp_&3dq;*F=VNe4jJ#t=Q>q~P%BKR>g6~0%Y5>k3w(95^ z6U!DiP?%q>>Y0S~0y#wgFf3t{;o36(#rwvF+KK7|mM_+d0J9p)!P^s4rHb*P0Vkj5 zJBa8IVT{*M)nX;}4Z=u|_y*N!d2gXT)!iWy8c!A`!kSbww?5spJHW6BgZv|n@hFlU(k>IR*y zm%GNuwc$nSq2VhLfYp*DI&L#zmA;iz5GoUfI>8_ZBvAbJLe%TM!ymu_;bPTB8(h0; z3ybJY8gk#3jS)0|nQ@$94vcivJHh!XndBYoO~4Yo%QZsE^a!SLo~Z2rf3I}`Ouom%I31}4(oop^(ijSTWk6PXfK~Dj~At7#3I;WL`^r>5uSx!2kWx( zTvA?GFPP$~vpI#h@l|WZXjABA81bD3zZ_%r2M~mN;-QX>V96?dzj=LGM|A!uCmt}U zrXk{XP8dCmj%d}`jl!DIi(HAsVG%8fJYbtaOQvrZtN}D#Oa;uQ-Ca~Zc6XOmyBH7CBJQJ8x~gB! zjC_y{(;j7BV_f$EJeOPh-D`9B?3z)X1AS=z4xt%s(S4Tb&P%SHBjGw){Jk@~X?a;u z84gz1eMg9%)3}LCKR;HS(t{k^U2D)Bb{D)he=lqUS|L=ikaJP5i#ov9mAFzrk5;Ub zhVJJl5YBN*OvsQ~6^+{|7e+bSw{5Mwm*&acBizIE zpu??UcuDhS_l#KY@sEJuVKpz(>VeMJ1D~n}va!ZMF5^ivXT)TOKFzLu)77yCO|K!G zL56ovmW;w7vU!#IbscTn07wdA}2~ z$7&98WgdLzT_OxSzZx&LDcD~ZymamPta-xihliO7o26PW zmubkB$wiS!t@KpgtE=Zk4;SQ*zApaM_$RR|@q+NF#^6%O9jZEtGf+weavOZn-Xs9E zeR{1#!8#(;Y=ef@KkjI->)w-|d!ewP1)W5%!Q$`Az4e4bH7jIF zUi%|wOxE;xJo&1bx*MI~v;x5sAepr87+J%4@~;@sDNxnd2Qsnk@QE-_3M$Z+lnG=! zGf&<{4Ufg@*~7AdnR5T~hgv|C>ci0DmGJMN3ARZ%?~D~`Ie5$_)zVZrH3-i8K8SMp z(L$O%1>)U#qgfaDhRpu zxqOQBnKr71qt;@=Zj7u%dg4L zn4PSbv_Y(0+J#t1DV_BC%O)lZO!tb+^RM3(--=l=Bp=%PEBtnli0#Uw-nGa_imUMt zw`zzu! z`{r^R9}fm9QDk`e&f_U~)8(DO`N75LIv4ZPtxN8v7&2tA2YFay7a%GbL0xByJwa%c zfd}81TakN)j3Jc+O#?%h8G(9Q$R$o9z2`$T3QvEXf|PlUq#lMnKX z5`A_|ZqD^wrGSr1WgYaUGX#$cvEb0RMlAyLn_PPQmREJ!kCz`uxCDSC@1Eo%g0nPv zzw^V$V0y1R$qU`rNxk^IJ0OLK;n;NLZaQ!fi1fp8kjJdz+DD{HrbxR$oF3h*P+(nh znqUOog5q*ST2$($I(Fy#vYH4!@|YiG>`b0k8)bJ0?RnrXZfX@`XQf+YtF+q87N_yE zZvY&F{w=|IO!|X5gp#?@^n6`Y#nn-HKZA z0#?-X0e!Fhe;&@$BFu#bHtNBmXJy{V zWs}*d?4r8kL5M5IE~8gI#b>j-03V$7=t3tV(pI!b+8)mYAhGyAziyVXzxSvmI04a_ zv@TvlmhizvIT;rAUy=&WY;^7qNbzywfkhF9)&;Z!S*P@j4uh7&Wg6`#en(@&I;Q|C z5E+F*1&Jr6TxC|w2dWwM9KULP-4VY$Y1$J;+AAGfcl^HkmlI%@Rr?BN`R?~*3-z9D z(`u_`3UFTj$c@f5e{dl*a5W9WdK4a8B;~V3X@;M^dOKUXIQYh6_OD6O329buo5@HF z5J&htned|$qp@3>TanbW)h^f`E77wVZ}8ffFE?@MVz>7{JK2;3aUSoO5bOWqyriJC z_8=v;Hga@aXs!AaRwxuUR|a0Dk|rK|sX8UA9F~>#u$%{#L(Bk;j>s6C_o?JTjh?ha zP)F|}kMKCm)_1%oqri5Oc>tp&f>0?Vn14dS$%dctRAf80*z_n|RxiD9CicielbxJCdeHxRn7C4_J=poUI)3!X$cn4R76wHvlxu8>t_SB zsL7;>sE_PeWp9airgdLsU5KnFb7Dm-DC!HE4uEvOl|_1LV2nb4IK|7G=Wjbj!$n>n zrp{z|39*&T3wA$vOoTiU6!w3Uc?5;4X!;qk;6)4Jh{eWc*~A%KjOb9kMHESRdxu$x zF`9nggJX`Tf1mq#oIMQ^QB()dD+}{sXq@GDY*gN)?Z@GJD{LGlw1h0@-s7cBKdCB> zD=FS^aW-P~7|qRy@tp_6`+WQJHWsYk2KG=%7cn5_sX&}tC#Q~q+NHD1q=H49JB1#{ z3U)^ahyJm7BapWiXHVv^L(p-ea$1UL6EdB@vhGm@8FF&BAkGxx4A6MnuSypXkXM?% zG$dB*Fc%@rxKLg^3IPtRRfG|vaaib)*2mX=KY@LwQQG@t-cwwPHm4buR=s4jDK5mJ zSP(cF)t$Qk0wj->c%d0035?POr2OO<;l5OHpugA`0yBXQJ$8%##Ec5^?kd%n{s*S_ z@y~nSc6F8;l^rC*n2(wus#BdY_Xx}CH;c)}5>(!`wB0H@K*V9xGg4;v-jDW7J8Ib& zUTh4y{r8JbKDSOaoQPe7x=k1`L$XYi>)@$j*7j$m;9WuY6fb^n5E98i9!?T%gi5G# za(eU~Ol(dPR)((iqczUQ?ILcqsdnJxQtMPj7Zo=0fc=EAhp}tZ+wi;>8D<>YJ*(Yj zJlv8w^;#>RTj=gp-cBW6x)be7@}uFU+v)#9)_=!S{r~^tI686=jvbD1>>^Ueu{rik zR%RKM8OJ7@V`Ptzz4s2GWY03Pq7uiJ%1ns(-p}*%>e=)C`&}-ce>|U;%ah0BocsMY zuD9#;di%ZQ)jE<$+{+)HwM?X#*w$l}kekOhXLlSjyt7@v2hRXa# z7A+Ceeuz)dg{!nqi;sg}f%mZ`l>BL(G@wc?Z&j8F{IC}=f<~LtX z=~|QP7!_LUo+;b`>+5uw%~hwAv{tdtK0LLFxZwHHvVOYmcASsSOoGapN*QuM=K zSC*2+%^otN=R#@XUJ#WDc67%wOByXRvGf{Z7^k}2PR5CzQS03{<>c8Uj>T?^i_C3k z;fG4E`(nL}2~}!l%;M2J^PuFoty!FFEF@yyyx4PG{LXEDKm?CvC4Y5J7oXV(YuL97 zq*m`DLvPk9dptsYydt6E{n(xcZ|RKS1keE1))Gd(wtc>Z6ZcdZwQrgKox@zgr-Mhv z)Kajrkc%H-QOIjNVR=Ebibh<-U!sRT7#4@>4iVkTpXNthndt1k{^_!pqD_`uq^geq zim$bDRRWIc0jH4Zv)oFVLgYQOp5MLUM_mLR5+acMy-$8ncLgu$sM)gV!w*e{QXvWY zbA*T9*uLsYfOcF4r$>U79uqF%Uf&r3_3@|q;g9N;5P zLBAQdAgeg)n4iH}wpeptVXpAm$kN*0Qa@tEr582Iyz74ZiURDIg;|te=4mTXX460B zO`f@&W`JJt21KTam26IS{PFsg(BQo+`Wf#%nWZQI(J?K1Wo`TB9Qr}DF}k9Vl_}mO zT!*CSbtUYL=8SGmV&(C+0lMng+V-zrt<#D)K{XJNvd4$|P#mq9N4EmE=U*2Fp;~Nn z@(svU4mg5BBVfvKrIsV3&Sg&xGWs_`_$Lvfk4(TnAN4!PSJ28(^bZC)oUCa$L;R-H z!IYkmB9#p_!F>bLI6qm+a7t_2(>NFgPssV~bYl+?9~hIDqZa5_8&O7VfhH9@!`m38 z(iuJN_SHK^h_!p1+swyb(XUDlSA!PJ_1y@NI$;;_iZkU3%AEZ z7g=xRvYdJMGXytYEEghbIrf5S{I|zgXcafK7v*(t*pme zi(872zUs82lv`r67NJMkh+6i;yFXJ_jZzcr;}mktYgBH@9O&be|7{OBE+(6b zeuZ57o^@HW{iH}dPgj`Yvfge8Wf@b z^ty9D0;FXEq{{r-nAIsm4hB6n+bdy0Mf8Eh17tX=ieJaXG>*Qmk0 z{=X-642li0s2AQlk1)*fSW6#o4UX*UhSnz%(reRYQRruhB>H~ORcv_(9m|xq(QaAa zdWG}pxw9c$KqLO~rz|$8ZlV3;{Z*irC|ZaImNk5zma0tjYx?^<+5p7+k8KX=AsF91 z&AjD$r{wy&QQm{8vO28n`ID135)3Yu+z}iLcNBAtx_(9r7KFM0Dt?UVP`^Xu6X=r_ z6HRUQy{n${7NIC!j=%xw)aIV@Ih|9vCQdoRRUEt`OC47h69XNX>&DMa(<7rtjH*>A z=`}{iZYvBCT<=u(QB3c#t69!aP}E>&pAp}qqhOIj2H5EC@<0T;8*+Wt();K{)eS0J zmD_;Ocidp`kTT81)wpEy8=y#!t(jrFj&+nWw&`YcUFCmnPCMi;=^^=B7pT$m)_4H}su;OkzP>^~EyuQ5K&MBRiY_{iF-F1v%0Dndh`wWt$5 zpG7)dHx&G~Y53gcspwME_BJq~an}5v_-`E&X+`40q|&~xeJOp^u(*G1CQU{TS{dlm8bjOa(i&qD3h_E2Z#k(_Man)_^-G>cKKZrNl5z`;3HpK zQkuEOZxTPbH=IfD)Ox&j1PcmReU5DAvdqAwl=ORv(M}GoC0E5AbG>~F+G}Zic!0ix zjf}3pII%hnymhA_8r1+C0zyRvyClAD;uPlYmK-GyqD8r`4!zEVD7VvR(H{yyL?CMI z)|i{gVhK}}QjUeiS$#CZ@<*n+%d?R_QZ0phdm9uoktf}jG2R2~Ln?#e?^LIMCP|ff zYh8AG@zLK{ajY+!emMyC<{t+^-FtJdY!bn#B$mj8nowLF z7pm3WO38%tJ*dc?&5=(5RN5-4Z!G$MQx!ixoeKXPlnIE&d`Y5};S9h_Ejs^!ex=z{ zy$Y+b;n#P#@af(~jzQ6Yd#`8qY&omTzdPg@I8IRDq<`)u*np9@efsJC+D<4lx$PJ@ z!`g5TGtQ4GTOR21-m{0rOpH>D#}_cX+InG%tMnJYM>0Vfp`6J4BcRrUZCeKcf`EZl*5>u}jWIu>TuegU4jjo-l`P}8{g zMksBi0@xdvhew*4)e2{{{r5Z}r7lWn8r>k-g z7btqAtUr+S1CEEytXIeHZX79_SwTbyB9K{0NsmA+OpwIdJua$f)jyU29}j86MarcM z-3_uQ{B4I@uUr<*pQD6Z^)y~KfiYNrYZxpE9v#^MKJy7d8(J?W8&>wu)y?yal`1^9 z;a)6p?JUxdd)?tawDHFJUc}4~qm``R-<5Q?U66^oi!8i zGcXpZrs;{=uNrM@aa&~3+(K=2ePPr|l}ai;sFwJh-74QAM(D+xpit&*-(pejG+%ma z0rVHqKFg~3T@&?*7`)7o#;tZ{2BfmS^Iw9gKI27?t6mU9` z+d37t;eB<1W}1%iGQVuSff9n$oZB5|$`;>EA4Xx6HS-j}U_RKj;bVTOpR;vG(%wLH zZSAo|W7yj0uV&zOd{&pBP3Drkec4GpAeDyh7_P0<%gBggC$9#b<9wc8DbR*m_K}Qh zNSXvV8AErZt$;JpSAsCPBe5;v;9{J6|nWM==+yliQYR0`CdoJp|3tPuY zxbK*`D7hnf=*7!ns!!y8$UjoGA+Ww`%cJrQAXx^Tibx+^4Ye`&|F6gmHc`iOc`*D; zXnfpbLXC%ZZ@PYvohD9c%Gf5Gwx??EO_zbj?f08biP4wVDM;^ST`6B=N9h!9;O|GE z`!goc4-t7wyzFN1d`p);p41n&put=nVpn9ZvJ>TJ`;8@)1*1Q`x|OSA6DzTs06;!n zHYp~=^OXR!(}V0gP`;GKUK10#fErIK3zq%Ytw6#0@DH^~vy=H+*P4Xz z{lSEzo#j+hrz=RsFBd4>Va$TBzCGtHc>hD0gt#KW?Q^jzxsR+K$7ulYYp|MjV&`-0 zQv-Mda^Uk_jsN)o=x7kvA^i@TX1dD~IeGx!=pHX!DD3%&=qE*?Ct)--WTv}RY_*q} zXziSV(PqfaS};iwa!m5+K1C22c|3}wLXx_CzzSrE0%xLL>iB`Shd2)*%lzE+W1dnD za;2<0<%sKh%smHTp?0mSY4L&fQ+1Y_=A8?+kx}~P*EEc5bJoANbL!kBFk^z2p8>_8 zRsGW`9I(IPH~h-P!)Hve_R#G&+i6A!Pth|rsb>yv!MLPg@|D5Y_xaPmzN!S%M(`|r z{_$JslG3~TtdY)Uda}Qcmal&sS1DAW9=Jy(b37bO#9}mEsrap&GuUs-4qj;4#N zRthVVBaF&#H%8FVDC#{mEGm6I_G#IG1qUQse^30kij)hZz6_@NGzmt+(g3jx>m#or z0me?v_Oy?kUVF&nmG4mR;Y}yuk#mc}HIeDM2lgO?*y!JHKgsb$eU|X3sD2ywPN`x; zJ3~tR5{ob|oh71;!b{g=Wt}XyEfq{#FX8NHMlzduzqLDkmJk|%3sh85^X(2x(0b6V zOJCipWrqU2+W=#nY{jcmcl<~dFkW}JoqX|#KTrR*z(CZ7-jM$Uk87JQO}NbX`V^Y8 z#7Y|@yR1FuT3L+>{dHiC!CP>fS3uCCIw?QlY%e#U#kQtuULuIkr_xc+ny~p7erJjW z1{@7&iG`60wq2*Im-h=Jc3T2m0b-ot!s%;nI@ZdV<2#oIfH)=m_KWy`qckDDBAwaA z)Ad|J9nW3G%j?JVX0QyAM~n&*4O>+OLDoFqi(5pO~kXH4_E+_Tc*)4Q$m#V@0y>NetYg{_! z#0eSJ-IynqD}!DI-cM18F!vEO6oKx z*s|uDi_NFq=O|9e3+y=gyxr{_c=9P@7Fc1Z0ByN=xBHzxpM)yV71zlv$)4g&)AA;+ zM+Z0BvOgsqU~K&bJ)W{OFeudj9e1)(XSui-!4kD3ywy21sNnqtl=9rMOe&W75+mtP zImqNO3Aad^IfaS}h*<$!vR++8pjiL5#FNITSX!-x-*>q2+wVl4)VA%}&pv84FLXnS zY+a#AdkLzUCFx>rv907Ew1)^E`tn9iTlb+Lr(kujgC&AC$hB*y=<>Q^rJMeDllEf?g?+9K zjYL#ZI3Z0dk{fE-`@grY=aAYJsyo;NH-smBVb3I`AO9$uuwRQ=3~_EJUU=v#BJ<;2 zs7rn3NTMO7pZXSc{VvX1ZAsn`T@_>Pdx=<|8-MTe^Tl5Zwmi445^%iVbn?!szMBf*9_UVNK{ZF7iGiksM zku#Cg?Ag>qyT6D78u&j_STOXI5`Y_DPfeYPhx^f7u(X^7gRHubo0SnSfI;xpk(Lj< zMY>E(0xTHF9=g6bJW|e9CVzT3w_^DCNyAlxK>v*{qQc3j*0@Mbg-dMCCJ!3V0DtSV z*MhYBmkBN>QQ|320{yZQs{`P^+XIXT@3o}rs;&aJv?s?<3YnDzom!{QjPW79#0Fcf zIdj0}VfW*+8rgBX*B0_^TRS?@{r-f_>*vOqzcg3=?VkA`dt7= zG<@qG!YtwRp?koQN3xyF2&v$$ln*KG&h& z*=66N&U#uQGdz$~u?6oQ+E*(ws-1?eoYUZ)H*%8^yjjzCZt9^AE0aWP$N^PIWNi+mX^6|X_SDfho00nY2z zYnsHFl81uRsB)}=;t=K;hq6{7 zXMG>_3C5zur;`^Ho+$R{Juo|Uay+Od95}>4c#$#)-uBo(CnE?Vm5v7~Oyp3JpgQR> z2kws-N2*004qrBhq736B zq18&jOcna&=6;;Gl8ky=fqdj83W_^-sO~N=V~Vi0#eC3#Jx<8*A$cpDh>Wp_eR8|w zeCvoe3m5AqzgE)LCUTz(-gT+&I^d}BJv-)9TRC3OnS>JPp}(okKK|s3joh9pPp&d; z*52okbn^xdEyX_3bh>30^1(-g$^>k3U5V@X#0hx+`|%dS=MSobF-o|XU1F6n`+Uy` zBF7)PTx8W3wW(NV;$e^Rl)oRM8cx7I$hDd^%Ni}S!Yusc7vge=%G||ZGAHp!U04}R z+2(5f((Bn<03a|xDo+*JUsU995?vzUg<3oA$@}V3q-C z@Y$^u(XgAumWRk&b!bs?H7GQmifSoGP`PF6U4H1=i~Ah-uV9^_=|E*f8dbq>*76Q9 zm+5fdqQ-3EYsVL!sx{Zfw{ThetQO^^$5H3^upf!K#^;>1HiT;;s!f_Ee7U0gW`D;V zFC1M#?wPUSQ~mi7K8B(VW(PA{b4MSgjS_Vx~!z&$+Qu6rwkU{8A_DN8ab990R{{ zChoQ=Q?2`GYA55mWk?h;S}-3PWCyFdErlOOp(%6Y*7-{dmrL^FNQQHo#8+NhWw3ak zu$>n7FS=yOilN^A*iL*})5Rcn&s#E%RWQ9@2VZ%Bxz3(D%{Nx!mKZ#iH(8Qi+M{~m>e&IU@k6ib>Y`LIR6wh_mOA$P%w0?w|ku8DSk z-)2LGfES60^6(g>zscjn%>%im$9g-2w5d$Oj=Y3>>wyJ30!T67jKmFPf{Qvn zi_(SVX@&+xFIc-6)R>8D?Dk$p7{)V+X@{9b&X!nK0q?p7HWdbVyrC_6li#%6Oj|{9 z1I=crfW~ZG>SD;BK`Wv1FqiGJ61)Y^Viyj#KARG%M8+Fx7+8dfuU$5~g||T6@)66e zVhNLahNfF@c<`26uS9d)fwNad|Ix$!0D@=ME5Ck&iN=riSHKxU>vix&FYECFrDFYZ z-HDy)JH-L7AJ5BL#B~;f`0$d3$4mKdhz-42k{e1D# zuRr8)2Ugnlb(0OP2M9u9;k7C2Y3G51319~Q_!y0Pz*~oz>l$9)cx{8X<_SjY z8TJ>BlM*{7g^}}!tILZxa%O2wU}X#TwO7AXEzqw?J8RE!{2L#rysZd2hI0ii-PS91 z!cWz?oy~Wb`b6jv-E(z#jS~eFi)O%oEk8;YeeYSt8(=N>-YF?XsHarQ2x8GN9<`qZLz}!Fw8J$i{_1sG@_)H{+htp?#NsMH0!b&4&_6qeGYF=~*r< zW;}5a>MQHb$UkBw5^!GG5H={MRI@DLL2oNi)&r6|DU0gjIX$}Jd17Y|R0qA{Bmd_u zX~EkX0nc8k8m>giNQVN_f$&EA zX?63+L9U-wWYK&pJ16_`vcfjB7*T2#TakLo7#%3y@%1~kcdLC!w0wc??>?Y2Eq(pU z2c2wQn$10cFRrV0jzV^HxIyXnBqk~e=nie`xh50D#B4^hZg_q(@(+{DSbQ<>_5z#p z3yc@+)f0j3tWuxKejw?J+GwU&ZMg2TO!WD^op!9WTjs2EG4|PPuaDHF|5mUhN?OmOZD3v!H z6y8#@|0}vd>D?Jk><3DZJ%Ch_C?fTwskDV{%O8Y5(%Xvi%TsRLe?Hv7EAalYf2!PeiMAV4UspV}v zB7!VVg9`n`p=2IYT{NcdW#D;$$NN00L$F(e`MBrO8`sr0S3*f>ud0WUAmZu-lV@3K z-DS?Udlh6fBU_^B1yfhwrpdA)$r*0wgXV*@S49H8y!&8PMXfUglA!0+)g9)#*Gt^> zdl30uNWUmwPC!Mfk=OiXBb~{T$IQMq!RZ2ioPi=Kuw+u~vtBt6p%5CkmKwwiBK?24 z(xBSaqojkh+vpP_>hyvrjv>^l%1 z7aOG9E@m;z|p3z8UhEG;a$8-C<9QA2@~;G zm@KD^G(g#GICLA>=!MO;&=M$BQL^*ejlHoz^+i-z5%4$Rr&>0iTpxd(kKGub>={k` zQ>i9&_7KaVdoOlTBBn=4FF+k>`3#?e$`E`V!q#CN)?nkzbB7rLkJWtJxbmm=^A|vF zvM5$hnTX0rzwsdH$CpWslHv4X?|Lf&%e}rLHLjukF-eSy-V<>dgPPah+dhL@|J%nV zQ?Tga8t`{_bMMf}N9|4L$tU%U-BOsmf_(gG_<09s0ulnga-Lkjy(e(l0JDUGt`#>- zCY+rkY5*G$Mnr!_w8jZ1F#NQPPa0r9?=-^qP4} z-|rFUz^|xGb5kKd5zL;;ma|4)L$Hp30eD`0sMYd`QUnXX5)|PKLTVb8U8%@1{l>4( zILqCw?3>P#5`-$wnLtPZiy$JH=Fn%wfp+RC_$_Y% zDeng@d2EZ*z(DX|>a|Q9aU==!X2ekFQ(?PFRn_CYt5PU;%yql0ctc1O;X1I@U)?l#BIR!~A8_)><1KnVNImJ3F0dxy|tjDRaK% zx457d1UXa3oB8N4Q~NfGmaj2VGgro6c%jCwWwhYA3-DOux_(E%r$dj&U#>s31(Hzd za&~&0(Lh<_sbA>nDY|qv^UBNF0d7c~z#@6P<3rb?Z3%5088i-8Z>Dj;nP7(PtjfB@`wR6pa7|HBe$G!B-X$?>;Q&#n-^L^4IjF>9-1i^nk(g}!lkeFfg}>LbIJp0`b#-{bD#USv8j(?X&^tKSsz_$XfeZ-osE2Nk-YcX!qg~D~zP~TimOr zSWwMnbeDH>b7EXW7q^Q+XD>Rh6W*3O@D|_kzOJuXeTEmE- z<*?ekSv!tJyr8d3XJbI0W*~79>pW5PB>WZ!KMUb|^jJ@*CPty`S9 zcX+!J3C2UIFKDA>*c%x?&HJCMV1qBX9`8M%8fh9YWlDBSAA)RO{hr)|a5c{h^V2?kqWwwx@VmacJ=+iRjJgPtDHfZ&hO^DaE}( znEz7DaggJ*{rS}$zAz4(WkA7RE8}!h#&dB2wpCQvZNV_Wp?tR?t@pJa)FT`9dOdOf ziNl4&QtD_7H3fMXNC6t4a2MWA%4V7a8?Pin7~m|t_wkxJNZ@9#9vjg2L0`=`Ii#JD zh1u*dtfL(;M%MB%gnPj47y$rNgYogJ2AK#X3({MMZ~*)Vacu@nq5{915i>l;{pIZO zr{4PH&SYx2BPWo@@0#{>cMHeR*N$qdV04!yqNM6u-m@`7Ma`UVmqlL-fLteP*;Z0f zc?%qfni42n#jLbGrOU?-HvBpK;0m-QbQVVS zvq3HsA5-eaZgy8)J-(kzI_?5UnXW5?l<5sTeFlHH0MfoM^7fkCsQeq!z{Zd_z~Wrr zyLahQez2jf$^CV#Jm~0t0mmE6SA;@D)V-hIsNitN4+Y|e&lKW zksbaL=|DQ?fsXG0yY#NQ-~JYL?zq(Tp~+$9srZ>Jo{H=m!l~z+N#{QKVG}4WX3yjw zoDR8BfM}$WL;AN?!G=NVA{E%rz&J%+p-59bWxiKWuq0IHU9Am^xba-B@zgE^goWemow zsf2`tYB^FqKk~^%UzBp-5y|e|L|K?M*#oE^;x-(-A1Z7%#w6-kHggD8)s$CKwpB09 z1xncad7)r=*k>^P)?0roU{wnO7}Mh2%Sdz{iA9Mee(=tn78>Q4%LqHL_6(b26erKqiQWVYYby8PxI$j;H zCxVtc)0z!m3>LHlE#<(}0S}Rl&@2_Om7zin;B8!-oQh)L>4S)DeLc6|3nRSQo)Fnk zxP2=`j~x~&+l?w0U8*+`YAh}6_u_$o5|~&%<%IK3Kw=Aqa0m4P5-~T97d$YYa7ZE$ z2Kte=z%#h<>u-UPFFE>(Sh_NWIdjFUGZZr9JHO-jwi}d7~pO>OJ?Su6%jd{+cdb z{-%KxVzV8{Gfga~NgjcA)N@&Wrvve68*QM?+ z&_m=^PFCs1`jcXEn3zxC3?o1lkqnlHk+3|b5xi>_sBG;tpyDQ0E;KsOJGeHInerW^ zmme%o&IR+jUPt;q0E;yan!q_>9F233+H!W}#Hxd^Wv-Td!v6Oa71=FFGT0m$l{B-M z%)CYHrxJiyjcHBBjeei~H7y1)r>wjs?p~o53PcTi3>>jKYc`$msUEEQV$2A?*j0^- zoM=VY-(a`U%S|1p&trS5Zy$g?V9!0jo0ykngzqTpCyanHK*Ht<@})j4x1pS1s#<__@zWpgOIpNWGu+jg!tkgKi9#Z zQYW*t98@>}YU3dgD@$6-VJ2}q50O@N2^B{5R&5cNQhDJQx+oX@OA2`A5J)<@E+hV) z1DmiH-n&+0#o@CX@J*uvMPjlm1KkS?3->Aqp5wx)oF5b`K~8@R?63=Fc7C56R-RDS z@@QwFzcEsbu_dGk-n+}n+w~;k-8~G!GNa=m5gUvSAT}$ryUVUW1}Smiw8Z{x-iu%4 zGDf2fiB2F0D*%Ca%W)6Sz7!}5w#W+az6Gy_Hzez$#JEVH(uf;p`Q5w4S+dP=FAss(9^mg zKBDSYMLyqbCpG*|NT^ad%0Hm=CvjSk5`YCCujvlt*aI--`SR#0-QuX>9BET}J44rC z3zsOrDB!P|0@Ng5t!T**mPU$Ac9Y5W`(eOH6@28#!inl|_r9}pnsjdL7 z{(R@n(<{X)@hmT~)y54aFO{p_0mE93hKHm2^^`uHnhMjifZ3r28U>H#rKx`cV$eD) zNHP$Z>AIeE6B@sI7#mIisx%YUqi5m1j9b>|4L~|~_(d{f4bZP267g0%uy<8i3zY0u z-%|Kox3|n`>FT>iTHMyPJxZ*5e6#wL&1x652mDD0uECGBGDW9PE&t8(klCxBgrlxU{2gC$Z=aWjboKi2#{;MuQFNsiDnG&ZsjBDAD!idX1|u4A51H&yV^IU8PibAg@%RSNvy zS3sI4cd`(F^ikM+N}=t=8=oJ;JQrnE2{^H?1l(LuT_bG930Q3KW@RXu^)Wos7KezP zwyN$2Wxe!ndbvy)!*&(*>~p>UtCIFk1tFS+Cw#(;Xf&unh@vvP5-CbKL`A7o`9W}@ zjuwvpPfbKDhb_$wO#L!hqUP=0vetcL>mqWJO|d&A9BsrWKACXB2P*&Q{=7Hk#9kNg zA}RLQ#{>_)&dli8w%o0=JPjPc`=%3g@g(%s#eq(O%Rc=&tJGxOOmR<^J4FaeBWXVZ zTT{Zt<3R)B^%y%?O(H1`if>Ml23~#r6Z`_PH+L}jeP0i}7Q4f41fKbc`~IwQ@*czp z)0ZroIFaP&m+8n)O0(U+JY#LPz345r72xcL4L^ZxiAS1sUJxnLl-IUIzH@O#)w~7D zoG%K>Haxuju@M&7Ad)F`2%|jRZ3<5c`NQFc`H!y)u(sciS2zB}Px!ShzDyKnys`rW zmCd7@A=de*u$4;rP|k?lT$*bpQ@RGcLdq>Wb=ScLnEvl`Y*Ah5^i)*rDV6$(>a=|J ztM-@#=?cqSBP`-61px=$W7s0r-JSflLe8Lw!~3$ri^uSD?cN`$lN17@08ab5G`&Kg zv~W9!h%}Jjs``eZb2|0UWXLyVyy*&;*L?~UX1*?gCmVX z-@X>Dxz_E=bG5UoZ1n(l(R5srV)+YNv?O$brC9oS3iEuYF=0?fRGH2YgSHe>u4>IO zXqk|70yqEd8P;sF4;AI*Ri)1WJ-(~HNZ%4QJj<9(lk6mT7D}Y{<3q!9`&IRk4;$Yc zdjsn8`T{5`$h|n|S`A%J{Q)w?SVDfWU+sQH_86*ZztQg6O+)&C-z|JpAZfGVS&}j7 zKE(ba7n3wQMK||^#eRLUA4=(tAHrBOjy_bl-xDedjpr2(jnFz`0M2VoyBYjGfIf1% z&uA6nU~YByZLF;~Kx;>aB2UP+G<-z__<@G^ehD}}zGRJoB`H4wM-6EBY zf;xXk>@0!oZR#{097a!HZjVU?&0GI?PDgbw$;@ zv*DkVWd*z5%~-SB!m`X5`0dCA*H_bBGvT~_`*udpj^zk60=A51R*-;+0}!)U%bz{1?PHjh+e<_Z}+O&$pwjHm95#5 zp^2Awp;*>dsFBy-zeG-+3Q9Vo8@HwUqs$D^ZFyK?I%jB8fA_U50`&npt8F z3nqB7xJFy+hF2``qX@5L-GreVPL6R?9(R0)uy7^8EEKLV0MLy@IHu1lkzGkxm^rZF zjd%5R8xSgHWASQsp*NKoW5G(*4H4E54zoaD0fbK>D_hlt4^7K;kwWG599*)>ZvXX5 zJ!)mh0F9Ben~U>S>XF7H$ZJ43!3z+$nqpzdlVNJSClx zcMqxaup4LM78L)+6;*6Hv4u-W*n6O#<^WmG3zPmq>3a7PS+pdQc~M8vNF09MsMm6L zeU4@IuFq3nNN$*E5dgobV3&D4>}TEwnhuGFC`rwsKoKxKA`EQ+mGXqrWAHfF^x*61 z9k!=z0Gs^;MXq1#Jz$}T1Rp+UDF?@cg9?KTU;4!XG$f;u-9^%&=6tkv)QY`VYEEpI zM4mGLJ>%nS{`4~@S?kbu5nnmxBgnQ?r6RlQ1nC0dg}D@YF3N5T(Z;u#q8iDP49s9N2&ExC9u27s( z3kyOjV=nH9A@f)gqM{c=xZU@g;EE<14vj_&F3nqGZvVK@F~C>T+;VTZ{ddbljLtVE zD|W?ObG|w*j00CqUsSyu+x6v>#%p+c>`>~Wi*B5BbgTL|hNN;>xRPmSA#a!5%B|6+ z&>NwSrsLNHJ#JyQF{r)xP30V%i$Y}Y_HqB!UBqcd^(IkgN98MV| z6Qe0bBmTeCSp5*RBvGAU@vZ`M?4Z1(>utsvu3pv2+)@Vm1r35HYQUevYGSSa?>T1+ zhHB;tDFD4eH95)p{a<}iw8_A{ZXu(Vggwc?JnmxUm0uxkF|*`wBpYv4C`=4Pk(p(( z_S>k=5Y%w55YCsc?KCHa#tyFt+K9C!z z{=7(S8eCkKJ=b&{i5A0z+Hw21{+7}cPymV0up*ue`r4U*g5v=ghiq;s1`|}e)eSxf z>S4n-r4I6rL_e4#(p;pQBVrR?VvKM&2X>It-&q(K%4ek7mJW|3vl3>(+(DS4Wk-(a zFdH{<^L`O17bNq8ah$7%Q44@P%nu& z&R%B#%%io-Jp`YHS1J{!-KhI*2He7}0W4T|5eS+?-ZIaeNtZ$V4=PAvl)W*4oF#Qe zPC2m>7$f=E20^tQ+(L|ahoo>}m3Je5-QS^=;xHFzV04LO!odN6U^onYcPw2mFjtaS zb)rc7(XlGTAXmkdyfBhIHsEPqjo9W(&|Synew_@3MCKePjYP8`XI5fmYapc4gK zR1q|iq8xP$`Qz6+yWe5bKMl!;^xb%ouMqL3Gx!2?DP&Kdy%E0t77R*%_|;R>O9qG` zA5zs_0Nw8BsLH-`^246vX^d@Vg2TSp`e_VUTuetfUYHw6T_>8Q7kKe>XUre*=`df4d$ zcD(4>;pPKZtd+0mm0SXBpF^6(LAB2J5SG+|@nbLUTXil&nWr&VpGzIiU7#(B9aL>@ zslIi^lsqS-@LFkh=wI;oYA|rtwxdh{iESwGY@bzS&$s10s6)A1-H6nR)oH(&HEa^^ zV3sqaZQ_8CRFBVxRmN|2x7_plM>Vi&Smdb$*mgP0^%Em?{7=Ng;R?Y-pt@aQ_)qUm z$By2b6E|*p-1P1EytP>85@gZB7M5T?Rfgz`rk|;pE|VF4c#nXWA2S!rn-TO*eiF#U zuK>=kPJZIz)K6(yr0f6ATcjYkN4s2uV}J8n5D}%^?z7j6i_iT}JaH;4=9GxU7#Sqe zNIgy98sHF7t!&<;6_GAG`KC@lEq9r%n*bsDb9>=DxDYXi+nxG2pzbfq3&05-bPR=b zK902fl6^yvY{o-t4%!}yLI#aQc)yUl2mdPV55H`^olP2|o z!omkzbFH}UX`mCKT&Oeb5X(4JNyhZ?0#rrqPXn7^0gxE(%O+dT{{E#o6$`M2Vxrmk zp18;6Qvg$rj3?&hv3&v5HXKZu4X`;djotdn8J|@K0pJ7|aDywv&EpJf?_L92#a!VX zpf}O2G`v=BR9^sEHN@?czR-W)&L|!fkY7ixs3`t<2swgHN5%1HfNlE*uwHorPj@49 z)zWVihZ__|4g+!^CooMZdO%GuI5XSoX7d4XQC`ApfzbQ1cvN2-u%2I9UoT~1`Ww@F zL5VtW^lsVaKP9RHv01yH@jh}3pdDa|>rE8_6`Bh;WO@bkMckUt*xJEG<~P1U!A$sS z^(ujw^E^!!H5gXBud|1~8(fVHpNqxuh5)Rq1ZFLKGBPqX68wk%_RAJ}AYVL^*oBM! zd5oYYVq1X$hnf3;IAsFo1XF(Sb=QW5Z$NE0T%~>yK~$?VL>NpSEl%R+Pvz(5Z+d0} zX!|9T*3DL?uVS`r2X}pxz#eGA&$EAH2Ad4{w)!JjWdB1ZWzoiCUj|!)MVuD^vcg;= zN|Ru>(Eb{7@!}ic)?ZyC3Qcr;&yGjG44U&|pj7~Kdb*KypOc@D2H1d6Z$&FpVsm0E zQ?`#9IvAkXv!%`t}vGP4O3G25x(yzj2{%K|YUp*Ma2adTr zq0(09-@_*bTtzBwWg!v3;VIL7%%@!!;N2PBIPlBhYNKVJ+Uu{UopARL!%U{3vU zs}?xf*-?R_0%FjZ?Y8~6a6^Rz%`}1Rh&asW$0h{O>>NOnilAUl0>jQXfPujLD(KZT zX0jRZtim4BQ9L&3_LKedt2+yEfM4r>f{rK~UcKF~A75MnvoKxUt)$RiBs}Me9yc)w zNfJ<5+S-bMcVWx=1?b>u;0t;S&>I$Lsh~l{(>2E`z0$Zf^#6GmD)27q8FTc1{@_3F z0%$-~?Kbh4h={jfX{aD*2XR2!_iJ;H&}|;yKN?gTcIYLDC2X5ydkDX5gPGxSCgL@kj$jPq8GnQpN< zSF7u4(mF6_oqYM`4Dc600RcK?x__%&P74kKupuV_0IRy70xtnye|5lC>pG5G^70vz z^y9|+BN@}m`c)!RV9fXiaN#`yaI>X|Lc75aPx>TX#lQhG=1X?lb*esIw^i-D3enlH z2Y7grjQ?Y`T$xbtzbgSGuYf`ucQd4N!Ji+?<@lNK_Y9dSvw`adO6r2+Q!r^>dTPgT z^Wl_z-|NSRsgA}C6?j8gaRLgtSjmtE$|d%!O3+?XM!p3M?lh3uH|r5s!6GYlH!;H+ zGh?yH*dytGLW?&E{#dPvmODkEH+Ld^iwq;1z%OhZMNFHDj%S-GG?`}NdS$G znDquCg>)h7@Nb`*Z{fCm6QRoAKD|;HeSqU#lF;zxSx(RX6N@@0p_(~T#@yOj8ykk8 z2^D_I72g3k?R`YUyqdjJB-0kqYeTS|CZ!ZZMDqz)y#zVhl3M7ZG-rf z0;p>0i&zUB{;?;6j;KNG0G_fcm^dmbD%v_JBSWrtlpq`#mt4^3x~VE-JrKUuGA_6Y z;?7$F9wfo$2uMrNNG2_kNC=X~3nVtMC=#SJHnXjazh8w^3Yhy19(tXf`glm7s!UqY z`Pf5Sz<|j@sU85$SB=kk!a`yVaxC+9WGryqOa{?rXiWCJs>UEP!4?`Dp>QynBVDYD zt^?^JlXg(rndAeGvPe1dDe@7xGgOR>SAj|>5tL1)+p=g|+~x76$5q=R_r6{Gds_jA zxVI0;X9hvcbWc14Mqq;g9cMm?xGCVVIfYBP*Vy#Hu%epo&6+U;$9(Uz(D8v66jH{H zYasROb->uTIh>t6o+G-~`~7LX82A{@RqzfAK2^Ew=Fk6Rcr#_Z{N}RzaF<_=emi4JgCPMCIn@3II=%R!5Q? zAC*zJnL=MLkMg^(Jhi}evT^}L)*>*QO5(Z2;z3f6_wHul8WBhpRKcBs`|EGp>aP2H zB%YFc2@s2jgj<4P3XC3|x%T#Cve3}>>t}E~jfJXu#Wax~)5!SC!HduC9 z9?$?3vA%%;m26kXEiM={^R?~vlNy0h1f%2_!=CW3U%%dfl-C5+`PgVDZnKpwz>*D7 z#X3VT!I3&3+H%ld)lW!BfK_Y8#sbknrzEHae~kKA*ZxgtfOM?0cmy==&EYEoCXK29 z@G4k*532`Eo{gix{OP_T^didS8cj;&Vu7`W839_91&Nl^&W(EtIw~&Pw-*X8w0ysqp8laYUtN=Y>>&?GJ&frqt<|ax9}7AlV6L6M|5HS25L=9UfzjQQCr`#V^AZyiGj5R<6xzY6aS0H>(UW0pQG;x~8P{BhJ3q|k zD-d~zZhdMVyUlj-bU0B}n}( z9Jh`JzKkEw3i{qC{9r1KLx~X zW3sS;*mbZLH)H0Kk0@ZClsoA|drP{yBF!>ELYwLUBrL_~v%1-k0WB^`MS>SR7Fn_N zfT-%Wrm3}V=_>YfV_f0vcr5T^4kdm_49ZDX|5mH&_VV8u0rij>rf~ z>Yp7*X03xwaBI|=i@#6*;Q|!)#}?~!gMJ%HFGvyqNeaX=;|se> z^Kz$QmAim?DM2B$KK8Z_4F1qw;5`yA4=W8NadCmtX+H4WpO~EdJv@{D+j3v>SWR+J zA2BxIhE*?=n1Uh=P{QAU-lP~z>A_3Tt~oUx&AEC1M1TM*<2I7`mk-scN7)$yv!XY1 zLrXY)c)luFz%dsGV+PcC{*}a+$xo`O$&^tLOhm*L#3d{kQSs zoKAHRafHl_gUX1?UMG8$J&rxgOh$GJNn~V`8L7-Nl9@dllocYgl+3J#_}?FWQ@`K; z`8}8Gxvpn?&pDsZ`+cw1{kmWGe7=me`y<6jjpiXyuLTntrWkAL8&^I#P2YhA-*_jQ zQgsxP0S`wJm|QRg*<;reXBa|pDzV;f8$%-S61ky*Tqa>%=KCk~E1@1lzl24C=OO7W z1?oLw-@vC^BxZ7Xo4{kBeq28v$M)fc@vUIxM>$zp z>_civQt0aj!Hr5D5A`A1z1-L(GZa)w@_~awmQCm(kS*XfetLHMB7;ekOU}0+Kl0yE zR58Q4=Z7-3&*Q_gr*zb>>FUOZl-vtIrG*ocn}C&5*6&Hoas?FXRv}t&0F6hztwj8);aEAVDcaZj#W)n0tS04PG6ZGaxMrshU1w zA*;^f`lfXC%%M|)d_bv)1^p!Zk;;<30(0}tQd^E{7I^z(Z0);<_4aXmR9ku>q9NhR+zmxwc5gQsA75 z!SJioNTt@mWAp8#3_0&$BHBZTa>$fL4M4AA4|#dp*dPlofYKpYRnDSCmdR=XT%u6U zuvjpItTn(}3HCt6)VzD%De-J^A6^)A05i=7qI4+1oWPk}sF5bXz4t?v^zk09AT49TqHAF~|~G6f2sP;fO|KVHhItd-}^ z5Vs}M@FOFU>Ea%-2*V0!$Y2sGawy1f8{Y)QJ!ji3WTWx2*Z_RUlKC`E1UPKUFTlF6 z=jWehyzK1q{tU02bUP4~R3UHS7m&C-cl4M}PI37Z726HJjrq8>F9Y@y!DMv&cA`Q| zx4W`tAp|EGK*>k{^wOA_*aNjBb-9AiPG>t#rT+w=#DylXvWoHEy?dOzP8q?$@o#S) z;}!ULyl3Pfi8lqL85yVO=1Bgg8Q$c?mGl6Vf2*{6!_MdCw)xGK?=q@_L%yuC$vWk< z{pnhU>?vdpgI~#svTI4CAl%mFlx%J*5A9g)W+bu)g&`wKQ*QIs=_UMOaanS$P;MZu zeTB`P$4>pml=q=34Gz<7z>;I3g92_MlU`7w$H0mEH46+XAs!ZS4)k}IIsQ5aJSv-C za;9}UL&^`$NE?SKze+eoQ*_Fv&z6eH#kon#2@qdFSB_I<`bM>)pg*}&9~Hb^Wi z&Or)MB=(qz=I@=Wj3kl`zBF3L_af;;l2Czx2FXK&r^ z(8DAh5#D5(#4)s}Y`#nHQtI_OMZ4k)GI-2Kz7n#%DM%aBWUiU5oR)RDkxJ#U+~@0bEQb<> z|BZ1>3>8g^7XJQ)S1HGp_o30B>ZxliO>(=)wP>6bBFJ0aigOkF?KjE$4JOuG2H6B; z?OJ|L-72I!G_ll}41hfMos+hSCV_#ot1y5k&yXcl(%w0RI1z+`#87GTSsQ8OUg*v! zs$%5P&1How>d;5$h=YcJ{?#JQ*2OzK>+yeUnc7&Q<0s@ouj4Cx*Br#Id@Bac7&*Bw zOt}UQkdF&b;o*k4qdK6Etk?ZG54b=^}G4#BlWO;l?WO z4mF<1e0?_aCC^q=cS}o)-Hm=~ZViWy({2V$ofttH%ihCZ?zK&gw1a>>neg=ELLlkf zjixY(Iz<2M>uKAEkWQyzOrjzaVBunJGqafgGV4bevq{|izTy}ofVsWn$YGZ4&G5>L zH-G(Rz&jHBl~Xr##(&7M-Ep7kQt7*Ms?_ScTb0X15HFz^&w94)q9P8K=+Twi-EtL& zc9~iF?T+P8*;9ntCVEUG1v^VxY!~DEaR20e?QFCSikDmJyE7!XllaYaiK%`;l3d^1 zilM^tE7r<7wJZu!tkSEq)HZb<7CCWm#hW#r-K5&4Ggdp>Mnz?1Od4)n+x0C@UFlOg z4f}pRy!(0mFddx)^)s%g2D7#XBZfw6cH?4Oyg;7j9^Ksqa08G{!Oc>=HYlL)HBt2br=PCP< zBuuKJRxPyp)_T~oybNaG*#Lxa{3@!_jNN0OTTj#S zDDcCJRL|Mo`#BRL#TwWG!AwxleueO_d!|4U;(@!!cEDLqbf4A)Jz3xPm6?KX6<&+) zl|r~B$Zorc$tpMVxN$IOy{c_{brWMr-R@426mpLWb>Lj<5ys0?DUnTBaiF(VKCT%_;9dKO+mFVT30k2at&^T+qJZU&`lvnCM35pb5q_{ zLD6!Jr`5oYyS!LIYeg7dxkVdc2_duqm+YHZM$L>v~e1bx>Pq= z#5iI!PTH5+#PZr}|1vSj>PhhT8>wr!2y->FE+bX$4>?*&fTBI?DKCTMl*$b>=rnf` zptipF5ikV`6aBL0h^?)&gRPmZyn6GuG6;A}89)?f#qT1W0i3w-vb&WpA$vLxtbvpy zO1-R@6+uCUTLY~7*=6s?&|$xGXdSBoMe6EY#ZRp7r|he!*hT<_(4kYwlmIea)zji5x92+2Zdo}d(PV=sB-PEnj%?(AGH7N9)JI6_fOSbfPpcv zyLP0NOyZZ7DNDW9>&1>x@gF!t&iM62(=vt)E6XXYlRkVXyYVMwVx&XQHwmAEUWF=b`$JJZ>G_P>63TT>7*tPJaNl>ycd+D&Y#j#o!x`xa(Cmvb$ADwI1DTbHX0=^t$L8nW4sER+i^!V+|BW)2eMAxnz1H}UAod4G$^D5xw ziWlGaFF6ELWCFVf#$28`(jncCMwot!#t z5e%qwkxOPYhe|A$54iTEGP+r=&yE^nbdKTi6SHqjup)I0Qv*u9xQI-mx@$?1Gp{u$>{W_^#2*wZ6QHv#KbGF_!)alvv_ z6GJXyO?$(-N9wn4j?}puE3};6epQ7@V&A&R6ot9dRv-B~`7q7>De`GS34yGBN;D*! z{Po*gHLSO%fHU#@^=vS@+dnSg2FYRDsUi0n8(1MZds0d6>L5rpIGlIbIaRC&OEbc)ggMPo(x~$M&DFJ@v~CC9SSu~8Hwzm zexd0=hyTd0+@OZU=bkX@ihn;$dE6e;FhLKL*D9WwSE<|(Av~_s0^sx&#MHyUb;d?j z4P@-&0rtUDE~KXik`m8N=+3CT^PI;yCh}@OvC}%I(TF4~8j!5ai!Cw<{Y_S) z10qRa_E)jE2m~eu9+~p|2i~P-^$SaLi(W>iL=F~!znm(Ip0-tP@CA2obV*%|{clkT zzTcad7?e-~(~Espre`v*_T=|-s>WN`33W3fbxZWYpReEK*9n?0h$%e*RH_6i@teKjR3p8%|T=fyUkBcLmVzteWh~) zz^*JL0=4`bfmn%Cu?YVfghdR%Xd!9T>H$d4er3S;t86ST_PE}09{}Q=R70Afr0`}! zx~`qYmd(?Xt>L9W0_#i@D}#hysnoVx-318@cag97m!%C)pNbWQLeK)*d?{z|oKygv zw}L7rEyJf4jX|+E6>MtkL@}Hti;@MvRXuml>^UQtpU7vdipK{$S$rL+@X&i0yuB#W zCh3H4_~%cUGr&bzH%Y;64Z83-z)jaKG7?&T5hKUY_xJ?V-sQ*WDN^CNEz!JF#mo94!U$M+?LmCTsOZhV}V0q z3~hxE&`Z^R_#3!8jH05VVi11@^QF%&K%YU6=l##0Kh>ae#@s?ak_mYsHviW)qMdvisYyJaFS;2C09;C{;q2fo%pb5`ey zzyF<@H}43(eyHJMK_ybz?c>X1%;J}$8yfIM zW|6m^?1Vfv%8KJ2;FMu~qx74RM*xAJ=k)uTM#^-IL{bQTtVU$Sk^l3=9I*m=1(4M6 zfrvspfbqH4^W?*@Jr6aGfTqdEC+@nqC1sRrfp!wi003*YfMyQfr>Yxrli+_i#N^`! zC$sw}8Q%aCL(}hn{dAl9;ZX%W7n`m$L%EsgvD>eP(NH87R56|~t;W&un?!@Kiwfij zLP^$eON*ququ`=8oEe867=qk1egQ+g5Aqd8_Zi-zcIxc^adcaj0eA1_&L<%shv#4~p?jDR4H?!Wjj__K%XE`Z720a+EKK?Kb; z_P3AClb~Oz3P}>^`_QN?URi+}4XtOd6-k@bkI4)k)Vzx)w(qYl&p8_tG!g$^tb9(0 zM3RbF@^X{EMRQ1GPwpE>#7KZyKz&49l1i#B46RIr{sf`||2PKR5*>;%&FG08Tkcsz zQnE8$qN3Vp>3OunmrQ|<58g9f%wn#S!QhQrsnqG2n0TQzZP0FAcjf!1%jH{n4OGMN z4d8-ofDah7O6pQEW!Vd^9|!wT&K6kox(79L1wP~iw8B1Y`PW}>VPT0v>;jn`mCH>P za^xahZtB;rbg{Gt!sEB^-wnN2nT$+OfYBr1*bo2~;GY%00tr47a$HC^^NE&0!<&); zF7XWvn0-pQ<-8xLlj4Wo09;bX}n$mb$}T3zZP%-H_t+66%1Lw6HwCC zpIsx73u#m=sp#4yq?$~ZSWo*~O`52W11@k9SjIFA=N1$((ZDwH`d(5gx5&1X3#uhq zy*Vn)0)C5kRG|iCQM35)JfZZ;cQ=m;q>qdkB)}hucO`uY0=H%;Ab{yW()Ufb^4o>K zk+N7g5H{DT!5BI^>j{?^M?N)&7e}f;iDbTm2i&74K@fcH=gDqza1)bJB}-d}oTVVA zxl$4H80q#SO-|r~aJq{a*=2|w!YUsC3#7h&^7cQ#Jd1k6w6i2|RgeauTt0ZzW^O*J znhp5|V$($uk+_)@?%WDco2a)OJmGXW2_KBn0p(LzYvKs+N zBY+v=)01Xi_lUpuVMzoX{qRy}&wWHG04Y*Np7|MEO%vC9p1@b2PRB1i?@tP@TH}#< zJwHsZBtW_X>=IiWILefg}zTvexFM@k}1N5r{mD0tkvg!q?r|z>et@v zhYbu3#qb@!jIa&BQpFj8MPUr%+tDzVsfOq+AVT^zrWa$3DT5Y!zV=fm!{y~= ztt5$FPVcd_(staqGo0?I)E-IczYexCrs2aNfnMUm%ROCfu%=vtF8S!bAkzCzILf33 zu!NjTV|8w8vwBsoQz3xnKZo_zd&QVx$H`t4(t5t#$R$O3Q6aQa`0&hhXdh3?9r64k?oaB-ZirL0S>G4&J)Q+^Zi=r-K zj|f%TU5ZLCfXscbiV;q{2qok$<~w$!w0)h^!c8OuCbNJ7!H#IvRKM%}@hwzi_{nAA z;f=oW4i(7Vfycwh=Jk)8d#^$?QIe|~IY|W0yYB{<+j2QFK>2AFsv*ZVJU1(1=n6se}4Jg-`GmbCGx@lz)xeWPQEtSMz6)+zr1nPv}=tpasu#olWc z`2sS_Uw^v`z@ zk+ftszyx64_v&#?xdcm{1cp{=XjwF;MJ_8j6-cojs)rYdpwxyno<$)O&3qpg7a#L0 z(WVsle{N{>r6Rs!TXGI42e9jABr;bW`5k!FGB|C)I`ns+zn*4^9s0D2^76_yHaVmU zlq@kdR8+vLLqs}A5SvBnwkoGv|1(CEE{$X|8(!zdut>T)5~jEnlt@8K}|7=DKBcr6LfPBDM>s9HH}<}??7aD z{d@&UYH;Jn!46CnVqu%i?$e!MB|2mGD}?-y1JbA4vV3`%WN9M_Nh=^nt+hBbm;I0m z|0e6q^?iAMo-ndL>$JJ`b5ZCaN7EXV?+f;(7p^Hcc%q~IJV1958E!Xx=38E9Qq|Kq zp6>NN2KIIgMv?3XTs7JSZYUR%@xnh(Dj7wTW@l+>$$*joc|P(iMFO+iLP<YPX@^JvQC&A1Af!sW)2)AK=1Td)K|aR_unVps5CghqZwUP z6#Iuh?2UoQYx^>p4K`W5HaPv!c{*>>0R_H04jK;15Qybo!&0myq_g)(`p^d0+fqDQ z6ITc?x)f1D?otGv(4F6a$T{fu6@w8|0olT$P=lbsx;C}@Exgigf6>l97yDLgWs?kv~AOSDAh4zxn`!tnrwMyMJbR`v{D=vRm8gR z1cKV0j%LR(2q*7-W|%#yH-H9_z5ocWPLjm`^Zklo1MZy%$6spQPJys#Y+fd7LHjPd zYSrx0a6H8+k*-KnipW}5&~IcG*X@=I)_Zs^Ro}gx=9qR1|6x&etP3OYmHjgy!kTq` z|5_t;a64p+D)6NrZqS)UL$Odm2NFi*Ca+=F*7tuW37NQ6VRKH zfo*KWcbGJvZYW2&HRLdq8t zwT7Xg(2-{kH8K+)oJ&=GU+W`ulY>RF1ThV~EJ6`@9b9$oug6iAM-lAUZQml;C`{x8 znfMN}imuNM>1e}zZ|Jj$tU2{@sKT*SR9Q3AAne}vIdsGoD8pYt-cVIjGdf2#LA6<8 z=d0z$A$KON0`Cm>Z2u`hW2WTcOSxz^p+US&7)xl`B_j zEsxG|ka+JU4%TiyQwy%?W?2cerLmNhcPTbNe7lRMtx+Aa0B=OC_r27$x!w5|==kBs zfmC%8#yY|v^|1}FudjasELXY|q^1@X>ZYrp4Cr+Ql8oDA3klLnCzKEKl{H*9N+)Xh z1>?Hg_m6wTB$;KGy_t>cUXJ-+X%)l@LjBcer|n+jnXO$afy4wpsfhCBNMyHaLZfJEYo4f2E)cXub^FN*e znLoYmw-$jFav}e3;W=X6`*jdk3k_@Zg)a@`pnQ!3E|3}w#ynB=fQre~`}D#s^rR9| z6wfrhl>5hsD_h@ZW?O;;Vx{~%p)QZ=gXHGw{_%$@4I${Q%@vI*-?jXYiWh+#e(T47 z^ajUU_Mv|w0}@ZN+Zl3*+t&rRFXeGz@Wh{A06--Q1w5vBs7nj(orjzz7FL2H zkpdZWTl>-<2VPdH(d-*7RvB1&ANft5D8syF;W47|m$F@NZp&;DpwWFF84qZfLf|*} z98ReE*xA`vCwk#G>ILwc*GW<#nK2Ac0?y9p%kmFZa#F+MM+7}0vj`bh_IHMr=}r!S z=93@{obWxULR1UaC=D)MJJj0s-y@z0wGUCy@A91EzW`*)i#95 zT87m;l+(&Z`b7*-&szJ6kz$v8kwABqRO~NbNdk+1uUOdve(OOOZLp@CQ*c>{>pev% z9+q>KngrzpKp-{}nS%pX;N4@yn$sx$Q93j4Ga^u02HvDbn0(gl5n4BwAcy||n=;<+ zR|oYG>jlM(({Kx&5r;dk(?Idp;WqRlaX*RA_<@2I>+923*7Q0jX1L+9J=9!TRsK#Q zLRsmz@kqZfQtTQ4Vs|(Yp2l3Ho^AnCLUx8AFx$=FK46C=-l34 z5y;t0_eXao`QyoTEOZ*$q;U$G|@NL5l!f9_q3 z&IC~!Dt!MeLMY_Y$l#^sI;&YR0I@CyK*O(Zl#W#88A$Syc=Hp-)lg;i!6+{sCK?$X zAdQ5}>-B!{6t)=;y3#lxbI1Fx&8mRb%6*V9c{%#2zszy5 ztNV_G!POKYH5hB++<%ZI9>SaIZ5FB3H1!P0s%H}}tGID_BE39#<#|<%*S>$CJD2qa z(nMs41x!6W599q5p4uDH!eJ34pF{sGza;62Lj_^vAs*DupF@WEq4}UpJgk{eH9B%+ zStV6J1J)98zoQycK(QziO=Jx!LOBatg+L+59>tminp=I%zAqM{1Ab!U6%+uAKZh1$ z0wK-wNDlN-O5x3Y0N<7y<9S}|c!SM&chq5urkhzB5 zzJ1$ce^(4rb27t2n~9vCgJkA+i5oDAVQn7;0sj7>N+^oYoS&8S`Kk<<`-gJH$m#wN z^7)Y9sd9lp^N-z0K9-hlbAy5qG(z<)V3e#%f0X>dft*iv6IC;>Sv1B0Fen9@hM3P} z@U5>14;CV7-gsz`;Gt=seaN?C;oCD#Ga}H+p8^}kBEAYYy{WWnC4~?SO+BQYag!`! z92sbc@bdzT3?dJjO1)Hf455Aj^6OY9E{rXvCO&i{u$j3(SBvl&LjFmQSzYq1kT1+t zjK3Sdz2Kr&pU_)g+chI)R*Jv^gU8RIYjhsAHBta%e-<=7v#S_;1tX$nK*j-KXz=`| z;C^3jKhpa4t$}+V4I~VJie*3I-}zQiMr7#b&6_ty$|Ox3ZX&XF@~-z`mU@t6e>>>Z zj&nk);@c@-lDcb3>$m_H2VQ=ym?zh^w`LvWX|&_Ux-xV{9V94|h5f4m#7h9?C+XJ# zL~7SPk^PA{&>KDR4T7-&*Z@qu-&=k}vQ>TY88-}D*?@OqgpX#?fEd)SK_vkKC%?_s z!T|KAV#=1kEl-ug=)(Sd?R%m`Waz=Qm&NUsV30*VYS(_P8NAZje-mISW}(|0Om&Bt z=UrJ`;@SU0XFWulK2y9&uVVDMfEb84a$3@IAnE<*u3RKx_2Q6KJqQEI#ud)}0I-Q& zg&h@7&o>3WyNL${Q~fDGN^V6QkuhslNwGAd^@{vbXa4A{j&yL&zBh zEmoPbM?q(^-s9&}Uejt_+U*|&Cct%NMif30fp-ohJVQvj7P!M&+Ybi1et=Z$+iSKv zl0L|cr8_{&%VIN2$_=VWfom@;M!OqC)u`|bZJwZ|wzP|AYjYM(eGb+m-nIj}b)1OR#w=LezgPcCN5 z1s(Lm%*;jX)=;zOK*)PJ`t7C7_cJ_{p*&3u^LyXYVIr_ z+z_2spv`D&9Zt(fz_pb<2z%ywDzI|l#0+4=67T`3+@Uvbiy$1=&y$fN+6zz|{VzUb z*B=PPD4|;E_5#IzgWuJCan8yzn*d}#+gPAA=)lXzJ-dtJ{>U~ zk()bxl=UbE6Lf1xNait(_ErZtt7-D>p&Q<9Gg8+0p9>3BFCiB)5hR$82c{ zWvf`2J3p)#xl#snq~j?x`bF~NMCd~`S{0#~Q2a@%=<(#U&+?vz24H^w;c&!OZ=|-q zO#$H_W>uOeT|ph&bHufs)5cHNo~nZcmFd5J2Ubj4KLtl)4C;SE_B}1mRUc@1Aoa2J zlv#Z(=bp__9fGmBLzEZ^zWOO7CWFAt(8;!9!Kp`_E6 zzS=Z2#8jc2tc_Y5*Z+HXcm>l~VqW_CAG17&TnM0-J}ebq+88fA*+qTTgXwMTpaut4*lenwok5Bj3 zp?Piuh2>lbqjNE^cAu9DR#h7nQ3b}sHwtprm`2jV;qUx)oI^F1#zf?3NUg;q2<)xV zlGu!=?V`D3?ECW*!i5h?E(I`q^l6cdO6y7ckWkjd{PYCK zXdU2#;V|wDJL|RxS2d+7E$&P81?SnLkE3y-4^%Y@(s%Dy3|ldb)WZS(bOK6=Fifg6 z3$4Pww-N>+$K#&XHoht}DyLB`c%(x^{??&R2XOteYrnpjKQ~IZW4DZA0N65;lS=DvnQcTPtgDyh~vQXf=bEOP+6V1 zYb(u`>Si0{CBsr1BlieVDg$F-BWcjXdj}o!IT+_&`4$Ncud?uIFrO9Mz+1>w#O)y_ ziRyeE?yzf{cv(sq)PE?o+(NBbnhZO9U%+Of&*A4z#^uBRAExsS$HG2T!b*c~B8`q9 zxQ>J7WHiTZZZf$kU*XRw3&nGhY=VaHI>_1$E9pEOzNz!zu&WZ)5vZ}$>6ki;AF(68 zr&AUmWL>(NGtBnK6H)r0pk^rs?$8iG(sO+}8s#8FP{pYay&Mj6FFj2(_6EL#5LtYC zDb#UI&%pGuYd=FeQIIeZu_cLPK-ig&HxbgOxK?L-jK3 zIw*zNXjVR+wY_vE7PjdgiF&!3jn82jjImy?2XaWP#$cB(^=rke{y1&?aq47%!b^$| z80Jwi02(l@!gHubFk$gPxfD0a91pmFgZxE1n(lFN=`lfXo65KI9%huX$%W*czwbPj zb-n!3+pfXjNlhvw9@`)en%+`TAdcb*%gDX;9Y#I@J1Dkb zt+!QyH32i;0|a=iamCeVrdNl~vlx_vJJG+VNX{r=5peB9iJRnfw463c_;AV|w6fU~ zpep!HhH=jU)V&_j2gsrJTpN?cV>KiOOM~rKjqT45+gG< zY;(}#E<0+OwZWapU<-z_S`opJR0*#~km`>XFYykS((Qd1yJh>7$k+Sm5TQ)q2~x z)VZg|m&$>0)CU-}HPG=i=h`A<`+kBU#|A;b%lV`zB)c2%NYSC>BCv=B>YsG~rN;gG zK;WuKll9h}Ydg9cJ?`buH*};%&F+X9_+xiFcz2iwg65 zctg6C^T!G#HK80)#a-#*=SADNOU{h>Y?vCaw9*L!ckU+y;3C`Z446)o(K!4d$1=?C zQtn5%3wJocK-_CrcRe;S)i>(E=!BF(7Z^HoGpL3fsk*ffMtVoj>mb*H0Zkbk`uO?2 z#&aU0)3kOi1CB6_$?P{*q7TdH_UNy|eErktKoCo~uV|G5SehS`a??>jYGwjvUNdH^ zuoCjGZH%(OZHa>XzZ+`qX<19k*p0XKBh;9L$8|sQi_D%4XYLtc1Oi$)h#Jb5HRdNE z)?-5rwlsv+e+bF$Z&OxgMzQg%R^we#1s91 z7XL-)K4dgp4`!*9r8}cGBVZ5GUGFbGx{Aubvy)7lj=AOCOn5 zciiCruz(Q~rNWFG`FBfgRE~>Uw;kYEXEFRh6;`Y-Z%2BgenR*a&wj&4-@6t^s(K-K zF0fDeZ;V~~@FKze#RWrAt|sQ{?ks`}`iuzBKsT_Nr zQ+!{aUfmBoBMv|GzQs@NEWGK>O<)Oc4kCiUJS*liyw2%+PNj{E|l;fEa=nz z@_()_9&JSu%^JYl*RqvuF)v?wm^K$Gh%VhG;utuf4ti9j0yHy*fW~5dP-e5ZK6Wne zOmoPVf=F z1P!%}16-J}3F#&yS~4%;)Ismq*U31MkQB(mAZlZoPDP$Q^85Pk_{VK5YgI$(eYK0w zg7us)f9w0QZ9j|mYY37vAnn^ft4{8x78G}9(vBLJ{bkj8O!Y)GlC}S;cqh(7<&^~tn2^B6&$iYiW_-p?;c*v#EYKN^n zsk$Q%n*yY$_p@4gtOo}AFVe`Y`-Epg z7l-?zB(}V3W{zwZqxO>GXwm(`k$6l@pi9REtV+m9ozO3$Fof<1;pl?&gN<8AQqqHz z_Loib_w6PnHX?zP-F!A2xs|A5#WlTC2oQky z)fvJU)!lfjB0D}`Vm%)zXe}u>58KD)JNI-+uAJ?5Fa~w~7r-cqeWLq{GH{xcl9#cQ zzZL-XceVC$wgtdq_jte<*`b@Sd(7(|Gi)vd^^T*xGq3k&pBV>IcL|VekbHh*cFls6 zwiJiePMp1-^{^Yc{A5V!(G%yeV+!zJgyN&98uFK+_C1AFh{k1u7kjRnZoTTkMiqK6 z5Q@)p*O7RxObhxAoAsB|rC%pNS2s`e(f*)A%k|cc@1dW9)Lx~!y#%#(YIV^wg3M*| zI!B=V1%?2YiMr&iuI+B_Z^DL;O$Mhh43XU)aqVQ0d-hXYCToXZ%g~9^1rM2hE`Sh$ z^KJ6I2<%HFz;&xfDlpy&JC%yVJuQcpY8k|n_fg`rM`b9}b+O_2TC|c;cZ1l)ISrCX)~ilcWID@H%k@)C-o({+)6X@o8&Y(j2$fNmBVqwhfT z$q5R4mV#x_wA$EiFMzy^t}{*M;Ku%CNuXHSOZjCao==+fFDD9DDzpGJeX{%D<( zxjqZR%Ns2xD^22T6jSww&X~*`0$0z6q*vj!k9c-toWM?cRpT{ZiZ~w@pJ}Quuu&L> zNnSv%dT%h2q&Ga@E+5O9x(WOMQ#;%c^<&3@!cN*Vh-9H>do-JVo|S?G;A;sb(t*z6 zZQswUB~_1mgL@o)T))MzG0Y*%a`oGUeyz8w49MA7KTP*bkx@g|`!)(Lw0OvZ{f!`r zzsTn=!@KrJat49X_zB}7DzPMmOfaaYU!kp)ogDm0R`28u5{s(!9<>ugKr^<7U`}=SD8_{gE1iMqv!NF1u@gXENfp1$Y+?ez7-QJCl!SKa?d@e@i0fS9trLll@g?>(mD@N&Y=1c*iv#9^t_2n>2T zB3lQMwA=Nuiu@e&@ zTJ26aeQUt-ZHAJiO!LaXb;}1sz8iWuIR7sKA%!|}Whu}V1V&ya@|!xq4z|_gNWue< z(LU-+qaO(-wXViW5WiaPQ+5u5G`!{Mb`Wh_)W5x~%zB;715_>ME2nN(_-!p8gt&fL zhFug0t39y8-TaCm|Arjs2Nk+ZH0gu5Vu4P@BM-6fHh@yNqqMa25dS_t4)!bQ?fD~T z*F(fovo0{wYJpEJ2H)T}QUv(qL&JwSpOt<|DrIpLL3|7H$8|7n#!ef!K-Pd1l!BAw zt0B@_heW`e;rJc zg&jPuU_^PaF=k6i6X&A|@C}mN>lgWY6o5>7;iS&AwDu|Z!Yvtah}Dd{zDk}o1oVz1 zU>!8$z~ha=zNxmzGVp%y`1Crso@XJr1{2z)tnGfvtGy=n03dk01J9SppcF(Ydm;jo zL!?H&@BX=2Oo^2xh{NjCGJ}T1{OJOtq7?3%jYTQbXPy%>-2L--AW+L+hqJd^z3%Y$ z?5R*Pn@iULIdgvsRs%HP3=my=$6vc7%P8t%k>cnfpP%mlKCLAXKWIbv1*@aa?h9{+ zQvV%NTRnQapZXou#!!iMyQ)is-1c-{137*mqaAHXCT7nK17CO>!=c-2pR#ZE>&<8 zi6(eAs)nBBB=c+KiyLz#FbP#jsI+X}mn!XF4Jj*kZ)C!Fvb{~!7BTiZIsM6&y|%)s zCyhe;2MY{Nc-@x)Oa@5>JpqdJEi|_hwpz~L!Q}Fk!8aUVuO8<8&sVA_6Y<~*V~SN9 zt-=&6V3We8(7nBHETK{B3GntHXuUZBpa7LY4Pd#7#IyeF(P*?)^KlTE8#>i*JJ1tz zIEtW#ZNHs-^$z{bSRzYVVN2Kv;W_H@b_ljJOJ&$9k~Xo^M%ZTfJRn0hqi&$B5vzy4 z3s@!(I^^B#{NeTz^HM{mM&@;6bG27P%8Yh^i{8VQjJZwUSn7ra>BROtZEy z`i?JxfSw$9Dihv5*h1B^*pyA;^}J@G`o+G3ENQslz60R+Ut(E9z5pES1S?RaA5l&v z;7;u?^)57coFVmXTu#64iWp!7iIBmO#d`=H(O(MG}Lg`_=d!%5Ay=MU(s*k1tO($docT zBt?|>1&3fzE*=l$pYB;^XIW~y9ewDL@zb_g5yVzNx7cHRh_I9t5 z)kDjeL!{vfXvLN`fCg&rTSHDDWk%;bK){y{wv@)~wE-ThiHkgrl{RN5rNcuo2pNoO z@nwctu1u?~31`2F8+1mmjLHigllZ0AbLHcRT(5z&lRZp9m-9TToYX+M4L$+=%UJH4 zVdTAMK2e;{in>sruMfYZN&~EKVI$CCw>L+*AH}=bNg6(`+>=CT6kK1eY=u6l0Utrq zswn6~4+2{Ppx}B28?H=)q_u-|fgqLnjcf1JE9e7CJ>p1=LYh`b06i)%>k@y_e#h)# zrn3U4sN%XJJ5&3AbzKH}Ux6NJH))GJx-Fc)x`UF*$5UA{%E=(#3Vsz8QYlz5XV321 zA`??!1Zh-SdSw(r7;8%&-VPFMzE_?mDMl1ir-)PlU$3)tdA#~eCN?mh70+c&(H05J zfx!o_pp0n=dkX8RQen8MW(=lXo~H<3bbwqVApx{|IV*8i9KB?4-(VM(OV)C{j{5S< z8OUNb?`Sq5mqCv@7)Ft?DolCyGr3E z4*p(Ta-$GMl9Sct3EB2+1RsL@M>ZguB0|Qd3h7n)n^?+CApzUixo${-C&3PjcQL88 zi+PnPGv-{othgvKxq-ds+7J+^pWmm&a#LqQD0}D{0{GxBBZ#H*Snh@0n{!3c&gR5-1|6yh<}Z(u%88(G;l(;3%J9dS(Gn%^^)pAW)eG!(yFHWhh2 zyT9QxNp#i^;Dasil_{yDq&lTzWi7D|iLJG0>FWdgRcv>n{`g}IT1}~?9j--H!j#Zk zlc@o><8N@qkELs%^d99i`|a`mM>Z0SvC(tMIaWzH;xios0;^)(nxm`9)`colZ3qDn zRQR-ljS7&ECLe<@zPt(a#fuIOoQ4TpS$5zh>3Fmape?OxU`HUqB-L>6(YDDT*;~fSk$-_2Xs{#rqbn+}clV#2ra+coH-cQP!;$Wk^GT+@c>rDU zEzppZFqOC!0!(ehPH!mM#$2v`^uKtIyiSPVaI4O6=ac;Ra-*N`)JrcC`OAShHw22_ zPCyE9ZKpEHe^>4}V`UAsZXu)-uJuv>#|J@=4-X#g^MZEr=2p z=K(dUfZU}eQjwD3HgxQ|H6uGh2MIZLT0BNM$|H~NQSV|Z$W#6QSQL0tECG;d;Dz@9 zygTt2D4*6NM`0`Z^Wln1i~5n9g*&FUdr8<`Ea3GXe5t&+`{qIg0=>zipm!PSF*o?; zc_N?VxemqA$`X@Yj*;#B6ijHt7Re@{(_2by<%0%-ND$eHBM6bAhUClU?q?6Fvc3fn|@Sl}HLhqG4JluQSuR$tH6=5&Dx@4Og$c;T}b zc7Kwx9cr19DV&Dq0t^S10E99G3@01hO@#7lCB(Iwp7lH&KZ(WjBEM@MD#8*Z35BZA z21#IljcxzRtapRix7OQ8WB4FxCK>1EOP(MOE2R9{?127OJuSa>3x&+4%WV}uX!;St zu$;U=ob>F|Qld{i0Tk#ZgPnK+KdFNkuGJmR2OGPIsr)UxB)T2cVh|F>e3ZmOF=TSa zP`~@TvP3Bh5fYGW!WUI38%OyCp1J}iD2Bbp=1WG$7M?P{s{onW8k1m?r%reH%xVXb zQ5=oLR$D88GomSJ>EBT@4%tkU!xa5aaw^N)(t6LauUK|3nGsCD|60UEV`Bc8ezYMR z<|(zai@Qzp&_jVI>b^CBdd~&moIinFyqZ%}kt}=xqyUYf@G*5k#2k=@BK@z*_dB4s zA$x#wXh7%}_hK7)ybaS&-xsuWOch(pu#Lv=yF@!sG5kk3v}iNCl^cy>_Jlk0(yJvL z|IlOwGTJ5)^4`!@)h^H9u7fxCeH*&q(@h*xoQW~D28p-n*&c1|+}c?rx3BhpocP}> z#A{O`niHd*?=FVi=NOP?Q)vohxEXGV^g6*P2Ox9tx=)c9x&KGu7C&-DmRSSBS3%n2 z;zq^T@TSkoNWN=h(B7w@`2j*Ssbsead?D8z+p1OD?@^?22zPPF?6-_1KU~NJh4@G3 zfp!+Z!ti)NqDnv-w(ZMTq~C@Z6BhYd!Rh&2|JfM+I29;6)R}^KG{T~xW8Qv#pOTE3 zwui6+lWu)5hY6Grg9^`!?~r{bIwYyX$H7@#M;}fq*2i&80dQoZJWW%e&?dl33l)aW zyE^|GKEP?VLk`p5#qBl|l~~nou)@96y0#rv&{y z54O!cvapReZVoz)NRPK+G#8e{GL9W385p`&ZXa9c%0_=+`GR%PA?wlf@APM)S2(OD zub%6*?DG|2}+(HRs@)&?5Kh0w^C&S)E$z|ZuJ{r2mzsgQw8FPN9B?>K)} z6_Vf^_R83R@&htuxgUf!r8i}4j**AIjy`Z>yUZJRhiJJLE2#4Pzl8o8eGQcXzRE~TG1NQh*p3k0sY+~Cn{0PJjP|FxM*VY(DJ z$Tqm>4|Gy?N&j>*NAC}BYv7C-E~rL8mrSBg(+>7$P@trq^eHty4V(>5zSPrvgPEKh zQ`kwX(A3c6o0SIny7oNAPb1p3r|~Y+p?p87H<}i$NAu;H7+TtZ=Qix+S6B8S^XjaA z65CIqh47pL11$K?wVgT1Gnf$lW>K*JcX)=8?fU~ zi7c9BS4pRtUyAyECHv-TJVbxJ zb|u3impj;1-Jpx$uYWZV0O~!=PggVR){{u5WPSU>TESY(k;J>-G;F(^0^q6Zcq5tM zaBPYb+Zkty|2UTJvtLujOi@?=OA2}jG-Sa;KwUZV0QKJ`$H~*Kl-SLOV-xUOaQaQduOEIw3vAT^g$48I0z{HcQ&;U zC;x($=n71={$9(0gm;cM#IiojJ>~pGyrJ_kWjM`!^3C?@)lVh>h;nuj-)(J9o}<^>+U9=GkubvO`cJ#}s#wR9elzN7m;3C(cZZC--WUTB6h%heG zX`61F^VywQ)x1(Au#_HoBd(RT%+7cj*>`1AfhGZ(0CWoq^b5#Nl&r?t;=I@v;RA7z z?!f%(aohS5jWRHs)d6eY6tnN_5{Vs6&Dft0Uv)ic_n{lV+!1UEsEBm&nG=$(guP)6 z6%w>-;eM`K^++W?2N=eq8?BT%>j(z+DBE6vQJ=geGoT8z^;3%|)=X2U$B&>BWQ*J= z)*Cw0QKN+2iU%+zr99~PKRLP3n2U? z>w{P2-2-3sdQ3-f@}!Unu}p=|u+6g(c1ax@PHDCJd;ca#m#M8nKZPY2oA03ayZK2Z zX7?!B-7VtoUZF12BM&+(`^bSDi;#>)kehH;qg6sw{D39?L!SRyHp@NIqt)ckteeJn z_UNlv8Nf;j!5`6NjBDPnb)j1dvQchEnX!*twJVety4R`=y|_ovW05_ z0Ig*nqWtDtrXfep4JJ@P{vt~Ov$W+MO|MxyKf*V1;FLNb|&x zW3tPSGW%HTZ9X~OCF;p4rNoB?6G7W@1%`!j%b^z;YKLB{htR+xE z>#JnfvQRB70G7Tl{r=zkBTFn4bXK6rL6!JrBkoNJvCA_i>rP=**nd|J4tBywT7x+BaWZum`8>lhte1Bs8&0YmQ1 z7y@K20MC3XKTxX1qj4wl$RuY(AZC%pBJ&ulF4G9%lm(0*ip{v#&ANVPSyWLEpfzrs z1t6|sI*FIXP$ESA%E!YiJ2hb`4T+xtT{Ct)N4d)>F)`;HmC2NMS;!FP?p zBN_<>4!6bu79*AW7)q@vMx*PXt~{p3Tf@Z5fvJGv3ssH_hpog~1&1?h9rkOg0BaTW z^b^0QQBPNLGCw*sCQ6!9fW%tS&t*|9@%u4Y4&j%v3FKerO8ZDM-h5+F2iN7eH1#Wc zRQYo)hp04Pe((_b?j(i4cSmIZ#`lacRH*T)kxWM`r)^Ewc-Y{3Sd#6UvYV}bPleF* zo)YVJcqkKYQRu$)&nQ)GN~S-|HXN@wNaldx@+VBz_JG7?y5&DL*#~aL7WMh)GQHfh zvoE*yB`KNKM+$bsIM^53w2%?(>`}*PL?5!4eG8+28dP5FlD?8DJKwwTm7rHCa+JoM z4X#in`{(@Pd_=W(_k4=Avq=P&$VPG#d-%?ut2B3(kAIAd?*K1YdHwuP z;Z)4+YdtPKR^swgbXPGX^wgR!JKxpzP4)br7eG&hCZCpxGc)_3pGSVT0JylCv$}YL z*>qQL)z8{MV-Zjc9(%^GtH|b)_4gV7bJB$Zg|0&w9(ar0+kU@amzzYM+p-kAEz6UX zUn9Huj6_;^(6Maf)1j$dOgaZ)$9)c{x%cpE?$hm^DKTW~kbLKu&d2hMbdqMfn1$3m zSPjf(6bR@psw{0^6UI@H6R08NksqS=s7^f|+HbM<$&(nR=2>xl1s%r+m0Pg57M_R{ z7^;BT18p4l427cUX?oC`*ui$z?HbW>jA|h-6!HVLTR2)ce~4p7St$bG(fV#(MbS=U zWRa5k>X!56Vb&al*SE|!*m?M@9V zdED}r+n<5M;V*FY?^Hw*1851;GR$aYXA%OVq-s%E;z3KdB6SN&3HHkaH z#V=wMo)ZhXu(eqK{#kHw)+_N4#GG&_yQwnbiO3BJm9Uq6eptzf0Sly?Fn4#)$tPlr z@R%S{Aqr{H5_Oy;OREu3!6Vm7x1}Gx!b3D&r!7Q7-;*8y`0|`{S{?1;)Vx8OI;r&+s_bLoMja zZE)~O-IZ0Wp!K=Y#h$H6tF!<*q~KQ}-f<2_b9F|fDy1f~fgd2FH{8HK4R?f>@C~r*PwJTfzToV3= zZDF;K+*2n*BLyc{3}s4*J-(K_U{8)CpyPjb^2g!S@xXq8RM8F8QE!967(B4?Ouw7* zb^%#N@=9#?O`7<48JOC!K2z0e-LCrv z0cBj^Sh^cM4-=Xj{X{oM*^_Yuow8H|E(k_AlX?QfD!y&mk*-^ceb*ZR7!Z)me|oYZ zpCR1#nCaW8nkg{3{XHnS@G4bTiWe$a_lKKVMOg(x{^hry(zijBAg`zzS#+GK59I{F zB?+qay3OiJ9L%d3SlHVZYI-fy`--K3Ly49lMwO@`ed3+>6vi-hJd@*rRZ)9=wW|U5dHBtJBu)iX#`xUQkxLqfjak)(k{TKbc ze@|>~M})x(1XxYOpFvu6JyxVK8%;`xlKpYjkzC4zjP9 zdT+qPZ`kyq_VL_|gpLxaNjK4jC&gY6EXGdWtZ_60Vd)I4$^|2HU(GJMQvMtRr`!+W zcoT!Z%Ntg^j=}$$c#kA--p!(JH1gN@*(!D8r_#miRP~)}n)iTgQg6BOVfrj9EL5kh zvjhLdF&gFvOGMuK`AkI)K_Bkq*}@8LWz5K%$UFnAdW1fA0yd7=4J!Q@Oy8FMtKasO^uM zG?XAQ_{&}5N2im>@4Ojy{P#MS6ywAFm%;pHyQd@k39xEEQ|e^OXj}s6H+zd_@0!Dk|_5@d7BE{Jz<04a&AyK0-W^zdIb`_Mt#ulVKuiPC<&%2$)Zm z8)Y%EP6!i|Ovte58n;;7b@MDtM8AVwV%y=QW}JyS)}VR9Ep?`oGioMZUG=u2K}sZB z7I>`xI+)v1-hF(7`YzBWL&vx&t0g{y|D}Ks6=i{N8i$dyZA<$op_7`;eYiwMc;q$p zDzql|iw_0Kbck8yFk!^VN6@ZC$(@$DUV6LGew8Lc_70tcyI`*>yi}e1pWNA}iyv?7 z9Pd2)pdXSz%2Ir8&HQDL&RL>nq7!5C9 zA>B)0K@3liT@?;Bi1|=rl@OUdXcc>AxK(pO*P{}6B9$_tg%)gliz1Uz3s|ZE~ zx_XJ<#gJQ&Pagly8#2pkTg@9hn0(z@s#GD$6Z|E2 zfFEFS%0woe2~5Po#N+GZYa?n|)fhTC*rfk+)8rFmQP2CFZi~C_m=_SS%ZiA12mEIB zi)_5X=9_h7<3H4bv=lViB||J}-i})A<7Sy|2bQ{&38KMT;sKQ#=KkY`ZHL7RFGH{2 z(&yZyoumf`1o%^(4vr@0ZpPL$qJ6SVR2joUz~-%m?qEQJ4*HTS&+Ah)+F_HTDnfm$ z>@I*Eth@L1arUO5BM;N=djsO!;d$V$6p_fM9sVzuw^90FgOaQ31y`myPGEv9Ki(#u z3&;FYIu4j6A|iMjf`pA`y}sUUbR@W2+5Xu3#&}9RCTKjJ}Zkv9|)3R(l5Ktl*M>e#l9@y7} z@1Nx=hHnj?|E&oy34VHqKq!DtKB%GD2(^z+QP?_2CzGN2^Zb_au<75FM>-jaO?l8a zz&ZmI3AN^DI8zxKf3!0)GoDVk)7LHAN_e{SlVEVX8VwMo>wb5OWF&CPwA&C3$g8L! zE|lsv8jNm7>(bct2`yop(!wuIpoyt@biItW>fZToD)@B=yf^>z(?@NgjiottZnwB0 zd7V<;LXAK_>e7TSIxarVb`1NMBqe?!dOviZip14Cl$alQnMiJkV#1b=tPi;DUj9{8 zwytwJaybQU=WK1jX5Ev+&b=iU>yhIDQNC+EW@RxiBwSq}pXk-={4+P_nl!z}5;%dz z!iA3%w^3C`>_)Q$^r48~3C7DHi@?6QdFo(c8sUmkTMDqQyOY^^8%>Kha(UvWb#SNW zb1`Di9Vh{yEDemxx2?%;R7bxW{wLUeVF&#V_|9(CTD$jl(n-K)Ro#e!7DI{76d^-a zD?x1)uYGTF9u{P08(yldeg1i!57~iFm^u+0L5D)aoF*!2gRZ?Fo6SojH>UOmX-q$pzvxo>-`&W?DYr*Q0x+ z%Cia)6?$1Dq7>XxI?!MVWLghDlk-U_hx|F>NRq{6Bmx2aPug1!#vsV`*_#C71vSm4 z17CR^PSlLS%n9<=fQg2Q4e{|)o9i}>(ZBn&1zT4-PXA=9dXHzzomaXc6GGGyXY84!M=_c)u%L}^k z&FDB}GL=~-0@GkN0-&3fSxv~VX}O_8EaL6x^>IM5F_C7W!^}hedlz#DgYbQ(`*EB` z$gytIFHp=*ej+#ca9Vx@70(l&s5MLpvn2dA02IOvOF}Rc`R2qC>-Q6upnQJrd#|Tw zMm0xI>g%1R|6ac+aB2*G)~Y%Z@we$1yqrT?g8VKEl9|t{nF>u{F^J?CRCp~*=!v_T zRakmSMJGB53Y%sM~Mw)NoZgGaQ$tReJ*SS35ilz1CXGSn$CbSJDtg)u%PqOx^j=Sfe)i;M1y}AKe zFH>6?LhEkzdwt5N4fwsuA82X5W1pu&;E{icM8AxoUqcSJn)#&b!r5j}4s2Jr=ylDf!(x&!XZD@3cbc7S}KUc+` z8jMqi&T=c@RKx|d7y*{$Gc{okP0#NsDW;r0zAvopQU9)=nBb|1Sc?%>%9=!f&^XSG z{?BO#Ea~95FTGR!fH6Xt`vDAv#IdC=k)!$D0y0xT={|!RU6f9+<1i_9C&VhLyZ0XM zTrqdka14nP6K009!Gv^wGW&c&@yJ{I=Jq3u?g6XE=&f>!IE6&2#pa=@{-YO>H!}o5 z8hO|RLUGD_j>-8L1hXjd16SHmG$m`rBuGtcE>XJ` z={`JmS#BGciV378d62%v6|&dtvC79N_47KTuno3AAnKA%0m!dtYuZ-V^LzQgH8k*@ z8AS6rZUe~aoW$~1TJxfPEjAigF!`ja&g-{)Y>u10Cc7LjD*KMZbAL+ZPJ~zo**Vzd z;eC(eKbJu0Bd85Xc-p6b%S{m3VjC0ucih+|yX(!5-;)o0*$~KuCBDwYhtcHBvf+Ff z2JH;qr8oyW!(pbUKBm3Fd!YyU(%vNwBOC+P&gSZfCsj0DIegP?o0FyM&1;Z$_=Tgr zLF&ngmsy?Hd!m;-QlvD?g+fnLDX$Kw(a6{hq}Pyde+%rSf%oQ;Jg_7J4egF#Wc9?f z{!mzjD}VqwvNhBC&C>sm9W7%);EvSM_p6Z+naHMvfNxJGz|B}6@ZfCd`|sC|(1jiV zo>^|if{!8|Z(5H#Nmxo#EItNbM|h$|)ymXTJF657>_^JH63-|=IU7QT5j z!qcN9&w}+&qK+-FJ}qGD_mM~RN*a=9$u!t2fnnl3N}U4!>YwoiZ4gfu&juWHGfo^V zPTmoGB5c%M=3FxbRgY|`W8HW_fnNpi0K10PvJJp*CBLhEk}<^dzaMoL5Icl@|2S;? z(8&jMp(nd7rxnmNj^!(-&pJ&92?-OsB>w@D%4rLV zkMtwxDW>mI5Dx#s%XkCQCBB83oJ7A8M5gT%KFZ(1H{lAoE?MD&C2E0eXX64BbBVi; ztBhE@b{dV|c|BfGVWqCs8m6)e?N>^>D`xXuWFDZ@fME3U9vup!vJEKiz2lrlm}=v7 z=Q|sY?3CB><98!$S-XUDuKB%=P!S${MjWG+)3}IXbC|bt&jNjYv z`0gO;k7l<8EG!5-5-u&d&IeuupL%SYCJoUwvn7?@LJ<;cOcm>CBJk1uyKP7L!EKJ| za(`CRb_8x*75IsUG6xt7|JC%EaiQ^l_TW^oTf3KF>jC%Otv%?H2>%|v*uRmZv}a56 z1+sP|GE4jE@SBMBeC%@Hw<2QJzA178ELI+s2Sr0`s&0E8QW5*vcCr@u5uR2t4*YZE zdlI1QV31djy9}$qO-HZ!*i#VavgY??4YKNzN!EL26RP=q&zUIs+JxFAMZDQZ!?v`a$`Ha;F;^!6dp%us}t*FD?@hicb*Vwz>2e`qS z+CrVjiFO=}nqWo5kqQT2jB1Gd8huFf8K@c_IV&tZ!3Hsv`iK+7)DK3n2^@d`@4d1s zC3R4PnXCf6pH>d3iy_M(kN2U?j{-uoT*-uqCOt!blib53|uvE9}YC1X*$LFl(^AQ$DfgCR|3IRZULLtJtvOK|j>qtHxdHk8#{!aOV#3g&UOt z4p(#FO-hEywj!ZQdGM=mLw4CArB3>P6F{J@LL<4xuD>rUKC#DzmyH4M4F;IwyXFHn z%RhKx=+@ZcHoX9Q0g`1sC8+H5<#=ba&nbh$`}z_$0pQ~M9qN~|Iw6v5ovvs`?6CA5 z4i~5kSYg7OEylQD`NF0WFfNU-IAF&3bvIQ1t+v^bfi4CZw^q$pz21_-S!uqNP1ii> z{<+%rCn{2^)15cIR$|sIyjKW9aUT`p*6d;B^PKp^cdgbdEDkrS?G9I)ynQwn&ew1# z-oZw+1K7!P{D%}mS4>Z*{`H&L@BKP3m<{zW0#%eTG>ZTkV&nh;Y=i)QIjfMl>o)NT!dis zeD_c!@cK!L11oA9vRnHIRANxKT*k1%EHXZ2&B!#QTHNqp>cpukD+b0Y522hM$Gi>m z=0N=c&h6wiJAj{q`)6?)P43jxp%c-ckTqcMljydbv(P^2@#(_FPR_lvhdH{MpO+lN zfX>k^A|oy^}fTVPno9qirz+QRAp_<*wQ=Qtm!9$wJ`pF zCD1U`xz)5#{BT4L1)t9O9)21S3>*Zs0&#lbKBbOo_+&EVOg}l?-poia<-Kw3M?)|= zMvz^2zkA9*7$73%O8CSRDXH0T176!Rh6xD-7JiHyn-9v`GLSYT^Vo2*G*`Me*QefrR@J!cYlL0y? zs4`yR*aMeZ^Z373r4t9b(A5OniteL=gn4kl(8=3FyH$)Lol4xVwL8wOg)B#f#?R<)tR?IE6v9-(LC zTmAkz$!DbU+#&yQaKMpnMO3NSzoYgNRKB;Qxl#E4?B@v%Z2@eIkxm*dx31Q;7f!R0 zFD?d@1WcJ3Qtu1>`QyB*Rj+S%pUDh)Ebct@*`;NJd!$$^NF6r}9X5b9q;vrktvP&r z?d4y>hQQEh4@RZMz3q1Q`Goo8XFY!+ocCDyz$@K*E^E6)FTOU_-jPQxp&ii+a88d< z0z5M6lT@KhtO_m8vyK49_FK#SqI?_AR^Tr%ynnpQXzkXkL%kttJx*h_+^aZ-3=6Sf9x8 z%(z5tWxj;b?>>p56SY3ddjVFD4nrh~X?NnFJi&+ZgrXzU0;A)pS(m1Zp~3t_0w*PL zWP_zGf=iP$4vv$#nc--&0K>X+EK!6l1OCV;*3O)_vgM~1EfWzb-D1WHRR(o)rC}gE zL}AJqtXlE zN0?8C=tF`Du>H}4!_qU#^Ic__9TN#RF4?MsuG!6|C^2$$)!5vc*21swj_fLX>SL_b z2_83==@&<@%x(*67o1h{Yk^!oSgI{gK3mji%OOH|@g@5UPBx7=Es- z=-jfBs=f7{L2T=}%baXQrb;okjkwCK2n^+Kw6-(;pBLb7Oy09||Hn2Q2StzrbX=Nd3%kMfu2kzMmBJ%XtEHBk z97IBxlE5cIIHLrKE)Z5WP1>1DqYX3VhD7H7Wm%NGl{(=O@4YuCzX8C@e#3*pDveGm zqIwKSQ7)kG^eAa@oe*fS*KB>gw>=~=Uj^Ru@l)xE*0iidFtqzEiQ>c`wUImtE@4p1 zyae0;DnTN2HM|K!THttm(hL-eU`4HX!CPvtzh%;w)8j6`5)ozwunhB~tFZpi?+>qnuaGi5`a3MiQ878*51lF?n+gSp2zJTM@%x;E#tn`A>+a6F zqEz1lUyBI6~LBBcFY=7SD~ zc5VuRCE&Neq&!`l)@m6BU;q=4IeVq=Jq|2fi>$D!_X;-hYX;7UNU|sZSD3Q!om<5) z%J}|RvE<|4%JN0u`VW-^o>fcmtkz_@z8e`6AzZV%@t_EI;Gc^?Lm@Jmj5?E$#99Qy>7;tk|0cO_$}hmGQl~N z@aYi@BLIisTiF6?`?+q@LVjGVB@cBd6Ic7YZLo#W8F1@Zcygpf7arsGQ!!Z$<=737 zB6^lMp-np=_mAZURwB$m0@*!o8kI+mKxCKjPwfc;bRoLy2^Wokwq6bHQb(wj#yqum zNVJebctT(6hmc-b$}arI-uFeO^%WL#*L8NR;G6BFMnv^@5+VG6@5<5E^m6v~n+^Au zPZaCAr=~pUWozfb6Te4<*JcJl`fr-YuNXU?kAZ7rbGkH>2d*${Y;?TRY}cn3OQ{&; zLC`a`g4dwWp_Ys}10ClPGSSqFdqM6c&D$$@;_13~{(3=1b612=G#|j>kG#%8!7I~M zgijKSdj2Z#XFvRSIv;rWqxe3AK}yX2tA<}~K!8z^^Su->Mp4ZL+8(V+d9 zU1}RCP367f`vYVH7DP*Mx(Sr39a|R>)Vh0;sO@j{^?<&hR=ig7se{T7EC3Gr5=Ynh zhWA?PrIhj}@JO)~8JJwB$f%Ofj3rR|9#)he@Cgd=Up)N_{^O6Ac&b>|PrF}Tb5d_K zIzFYG3+TT=w;qpb`zRf(u@IT$`1bo!6sz{%1-z1+UxVp^ zzrLqrAfk3Xfa%QK&GvG^9x>m0)k~#4kU@vKR|gEHmII@dkR{MOuuFgGJ~rzA?ubLE zItz9d^3DM<-yP69?Y%G0aiw%rSTrBTiiJ1d z2;(s6hrdX#l_vk}9s7nmglicD5S&3c?r=VEcRGTYdUQrxguBh4$ZOYSd#27%r}9l3 z_4&7kyk?(i&#tIxaPi9p5l&QI>(S3Iir`bsWp#oEOA0tjt2W5-{0)!}&GoLjpP;Av zd~+s2U2`(4$yE!Z?4?c;Zo#6L``;PxcA%FbsY;oAhobRY(Zt`v31C8BfkG`(1A!ln zmpdR1en2W6wrC1~&@(N6Q~>vm#9?`sG zaQ^tEbDI#4VJRgrlrtV#-^;z!0=y^~f%@gtDF+*@iXm8Kj>GP_>tzfXcz!Sa0^c^9{^63D`pY z#Vru5a8^U;Hn|&;@B%_30>OT@-=->r&5VmMt`743r3o+TtL8p1A?3Nq!?LGICSu)W z0qxa17q-kVkjeSnZwu2K6hHnq`Rpj%G>eHKivz0A+Tf=^4-AaAyn3((GIafbkzq~C zNWcD?<#pyd4N9L9OQ}I0H$|!KYSn)FXp`d3`J;~)hdGtk``T`GIA0uz(IkR!e$oh` zk_OqloZp@mEmK@3ueIt`+6-ZJ4^lG6NbnsHfJb$3Rd;Er7avv{_X`7UtXk0|+i5#> z0qMk)d%V}2I5TAsIRZ(BG$b|%bInI^8UJ@9Ajsx$aWeU_3^cIQ4=(2!5m13BMkf-6 z+W7gSw6ZCV`6DW{=M}a41_U)wM-RfNMPx!XM4+uO$ zE?Fv~pNQHIf0!>S$fpSd9{$0u2$rUecRA)&agNqcqaVeFprc7T*;oW%i6Hq~1tN}3 zFrx;yr4QVD^wolC-u?hc<5FA&vJ9KvCmTMFNh~|k75keba_2kXAdx}!&VW@Ja&4Zx z=mZF^rv=gx!0~*$KJiDU>Tihq=?7K0jU<{3N~u|7jN%P}eGs%m%%+Hn*3{<68+44m zj)ti@m(nzS%Aqp5zP-bg|C&t#-~b2jut)g4(zNqfAaS%R&tDvJm=r1GjALemJz!U*qEh3*3r_u)s7t^*Vg zKx1kRa_K9qmIwv<*0&tFZWXg_LfO$4TEEz;7UHHU&8A_Luzh z9M}%83inh!Lcy^FoXA=>UU*-A{}q`!EgBC!H1&=$vH^EB!f6OPVWVbx|@oS z=B|C38<}CGD(f(Tn ztIr+6VCxak3D-x@(d%?A8gxED5(q@-{7=a~h9Hm6-qA~QxKM*a=Aa=MlwwUJNs7+4 zqbfS^G671am{oR(8B*)J#}dFF)y;i3-&vbR@Nh6-mghMYmsZ1De)sQLZ?r;mk1LEW z<8&_3%Fq-v+H1g?<{EaxKvNDsLQoOSZT1@s$a6EwUj)7ZNp2VV z2;)$Kjj?8lF&vh?y~Q8wmu6vD0F7BmYyJq(|}A-H;)To@Z-g$j0cfb>tM5 z=Y71*%I5IDq4@uZ2P}JKIzn#5*0=}~SaU{U)uvim8~!;#o0w zVpLfUgNz1@-R6umtbz4A7EeGP(Q}A_MLXnf>mNXEc8 zkp?_mz&-2wHUL{YbdkuN(LUxH<9-}pp7dU&`vhF`^l!;dD?V%I9@uK?l z2A}Hl0>cQ%oPSw=K}X)FGlO$$WeRKyEA+dSvg7ybEc%`f)jhpP|9iYiQD=K9YgYXv z(U9-K9N+bljEOBR!cw#DtPAxRqL@2V$=!D|IQ0sX5o^Fqg~4%YA(xaQ&0FG3+3Y!l zY2n)Y&Q~LaVI)on#?6URhweClcQ$q-$z06(ogehQFnSAI10E@vWc`bHp~$#y=W7;w zHS>;nicr_+Q2@$v^B>_**(ItR9UyztCbo+5RaKWisEkaZlEH8wquO@h?Ro$90K25f z&2F*VK18{5NS|Mr9%~7sFqF&DgWHu!4OFxHU07)oYBCHZ$KeJL@3!87_kurJPopj( zQX>iAoDG`1foBrOuO+Wcvub77>!LM?0O`Df%ragf5gJKwJ!d0$} zj@VGO<*aHASB8wNP7&-7-i-IY0-c^yw2+GK^{{FnhLEE<*S6%{;(0WGcTHE6$-#2c zy`0;t`q#au%{l*7e_&Yr1i~+Y6u>(ZXJO)CQ1qnBa?_r2!v*e&Ym1AzeR)`8U(W3P zK+_4FL;Cg-4oe}m$ADntuTN<{t^_St1e-nQzzU1C{`v8F|2dHI!Nqnti)@9mUJmwl@Kp0e3DE%!ooWnf{&ei32n~o6 zO+Fr696h_3gn(ZKjBpvzYdvSesGf$!tTM(LlcFNge{DerI9L7lw#ZM?8r(6T*<3WX z1bEy}+%`M8-T+VlQSj4Xp?%^-$1WVL)2)c!#S`^T5_2VJv<0OglQ*%uKb#-q(TqW0 zf%0#4(;(R_OMKk~0oNbBSNxi8oljH`J=vF~X&LRGtAq-lzOO2u?EIYss7(-h{d8FGVzANlpQ zZZ!wF4Eqo+MKbC6-V1|vP_f&Cou2!9qSqp%WaTyTm7Kf4;D7yrp-2$3I3AqwFXCdQQ8qBR2eJMi~8XooD%Km)|CVd~fFt`e3YJG$ab|0ty+77G8A z?=5w2?R6zqb^6CO4S4%xvO2O!+O)EC4p}9{Ym4+7XndgRUb!*20sGrAdja_25~;m< zluUw+D^y7XbAY+{;jZnBibuN;x$1al%JG5o%x9Jvjj0G4uV`efr)IDm!!#$Rt_xrtW@%6!t7$I?_`^q|@*H+kb!m(j~fS z9qj(=i+b;pqqOQPs`k4;0ua4(J~jOPn{OpuK?6Fq`~|W=C;5R2t6V6nWa{=AXb|sG zmtnkQf!k=5Mp&nU+M=Q7Q{-S8kh6qs0=s);TbH63x4`ZIiLzdI0|Pi-&-k;8(*+me zQ_vgiTpwM5#!S6uD^g6VUzA#TKwv+azC-x$Z>=>!r=fulm`?TQRX+*&%?f2B%Y!=j zq-<9EqU1WwD8cTtf*RsDC&a=M?^9p*qI|G;i6@px#8zWQjVO#G#|GvdFQ;f_YfxqR z_XHy2?uPMRO+-rAypQ>g?q+odfhA|K;zQQuYeT{EQ2l(hL{Gf%nkoj9Puv)$m(Z?# zhc{Pk_RD4jXbf&b+&}V#OI!GP5QdPuTV997BFYd4hd>> z$%C|%!-IQw-gdLgPSvpuu1TG_)q`ysb!Bld!xE!ffMaOlSFgUtJt33}C@a4=zV`Mx z4zDC08dqA*)fkE@vbXzH8EI$Kl`@;~RNd)-A8mm5tVxVEa+hNHZVKSR>1>RiU#Qgh ztu%wJ1;$f=vnVG{y};aqPvZh@C~y#c+ZsFc{P!sz{cyYJF(45i_&EaY^{!_*DkVl? z4k#U)pW9l^q)H$td|&J$9fIXxt$&>+k&s+)rE_^Hn#Ef6fTpG;daWilGO@0U;_t2~ zcYY*UAbC%KxMlca9~1g6zBww}cztb+s*`nuGu0p^BDPU5G64eyn#XD)8RKj|2v6z! z{xN#~UZH|z`Y80LcOv_I^huQcAkS!9*?L&goERNu$CI6*`Lq-Bv6G3)e(+ZR-jHZ& z?Uj*UA)dZR&%Ou@hug8{U;99^pArC=?me4~V%ojF3xOl^^fD7r)O%jJu)W~3>R7fs zc%HvBEA0NGRK-JDlCZ9OAZ=W3X+_f~9v0+E#8i-u087AkwgSJZMV<@|c3N}(tRS6S z$uD4${10j&ngF(WZKIhUY`tO;?=sII)Wn^!8y(iJ-H;xm>)hOI#*nnWie2zfEOCR^qpZ^3dy zTa^GGkLEm$`(9nSS)E_c=%Q(F=SMK^Hmb3|3#I}GK?dro1gR+sMg+aTWE0Q2R!`7f zKh0J<$IJHM-P>=fnYa6732}+!DRCT}3B=2CIhqkyJH#?{MjCHQvUjFj^O)G0_St!< z_*mG6;>wi~;rxDdz@TpppQK2%?O9jNk2ERI*?`mo9A0~_^y>=j;4-mlhwMeX=r9CG zcq>*3@xz<%-Ef+E8G5Ao{1ezXmT;NGq=ik1jUSBLWtTQ#Ezw{y^?vdJEjU4JS%JMJ zIxr;$MlUiAw~bEeRL+m-u3FDO*c$7nyyFPW-scJqJSoVC26G^qCxx{~DGcax3KT-b zEn^r04@?Z{lC)LUn6^4`R0FM@a?}LhK+w7=om^%SomlM=ual|zik<#C(|g1MGYu0` zdNDR?uP;g+Pz{64ZN9W0Az~xDnKP~f6OJ36yz^r6M%sQT(xjpEJz9Y^B=TCuku#c2 zjYyj|zYDp>dK(v2$*J7rkec4Ud)T~v?q2lw=eH5@r+*p6pU!O(&5deKoFXcc9EPgT|DL>kBE%-i&Hw`F z?mob9v{tg%SL3_?N_D3$uG?!%&E1|&bY#`!5VSW9SuYGb;3IspWt2?xmofP&ZnA*) zyE%q=eP>rx-)~rihwH28K2P>?^#Aq5Eo}{G_4ll|J#?~!Iv@whhms|H$j@}DCeNbM zzjEt3*Lg`CP;CC;QM!VYatO13Rk@K;!@0F}`1(@Sq6TewUt0!`Te1ju$^)KA!IZ6o zqO)=cRM<6-aT>54>;PlQn$0p8?vC!z6R>e>z!G(;(-&E@1LQWJ^v5qYRF<;+NXtbJ zQIrwEYhN2tF2EU0WZ@K6*V#lL+G`#ZVeR*8Gx zSQv{Av!?^iJ zqO?og_p)t3?^p*g*{n~fx2!6=PcDVVH$E{UR5$+u*6uub=EBavE|l-qN$fK23&ujn zYbZ4>XW9OaaMvk*78|!+`-7;0*AIju7U>`xL38xz;swT+#>&WWj`1O>u$Q(L?|XCv z-V}`8>T_&R>eZ=}>2j;rHaual;^(g-B12xW7LFz;^2@QNcAZ^(Bs_iUAVBS#^{wXc zAlJcaZQk?H|41ru_jiSFxnX?;DU+mLUIpN&h;$0SxH5EN1Ea%t4-J7QS~IATGL1=c z8iDmxZ_mA?{_s*>wvKvp!~4GMZ6!;g71+Z)z9c1gS6%}>`(PpGtlQG3&c9vMQKNgy zjlS0~%e_gHWOD!&_28z~GRx`Ft{}KK(zVS#pub-^_1hU}qc$a|)|PD2bDHPSmle8` zXH;MDoY&G^Q=`N(kpEHCq06Ye%mGi&I)DH_f5534=|raowEhzFZ?&afzyz$}CAc-| zX|Y9yalsH}RdFV~7zGI*ivZ~l5QCNg+5_**h9OX5+PIuMVIROi4M z%kbqd<-t{Q9G7F#+1}IT za(?jYHalaL6jJb^-%?})57T`c4`?f}2$b#>dTIZx<>8(8030f>TxjZx3l_kfiV~3G zDYMEE#JiDxtqB$GvJX$bLKD>TvS~w0L$8}7Czj|&klv56_tfuLA`eAoc?g-J$eHxp?XzD67iFW43- z`vjK0CvhG;sCStoScj>KX{95wOTz3qlO1%sk5}4Rl^Yeh@DyJVDs6$>9xo8|6I6~m zpWfY5z>5eV6uYfk4k$DmxuIX;+!J3BhNZNa@%~{wl+@F3;Gsn{jtNyiri?1~z?Ul! z?YRT?b!>}b@rZ)2zV*m4x2KRhs~VJgaoBa;*UIHDD7jwg=20IKeGfDcIyoM%>t=tn3Po5{W9vKmcUmzi;n-QQ z#iYalBX#PxqVagN!QK2OQ-5GTcTTP)aOqu zp}Ns$9*H+r=I>eVybVb>T?_#UOLc$@6MRQVo^ACpoqe&${bGyhcM0Zz8~*tQ>Ad!H zDoX!iVz$}h!-Sg4sat-j!xuoOFM+8Chj~!HWp?x%8>B1T;QHn8#9BTAsnUptr_Dfe zP9aVi@Qc>)#sOzEzs{9EeMluv&&Bpd*oTzfxX-uIx;^Z^`Lg=E7YEkq2tzG?Ifo2t z(pm0jr{!gT4(<)TC%i&gSnLndF%^UzO5>v?F2l7#ogdGBYI8ANHhdgalH_Bl%sK{( zh;vifM;7XF-OQ>-%dnT){GS(KUVq2D@vY5L+8eid8xNbMPx`#BUv#b~QTp%_O&iFA z(5bw-xF?r?(EXAWOOEG>v4cI~{uxr)QbC8gQqo5|3-$NbQA`S3f88q@o@?n&qtnis zHY+o2)bn~JzhH>6Ub}}?&C@Ss(Vx&?1cT11I;i_96db)x)Z`@KDj30mPdTCC6czIC z$L>&WhxIO6XD?;zWxl)=>Sl|=+GFV@>gRlN81^BQ_Utt|Yfo%PaI;oeM+n2^8{WeJ z*Y^B}mosH9UnH6N%kjgoFGYI!T~92DBR6JhOH(Iq?G*s_%fG39nP%B!6z z4#UiJUb0Ea@fFa*JnJFFfwQn!hSXS`&^+!2uf?#`k@@5C>bJ>Q`6;s1eemwTj5%QY z#9G-`t0kF1a)0=T7_ERgFXReqTLwbKsGdX#?}3Xl{=5H2)muhI*?#Ze(nAj=HNe2o zC?Va=45%~`(kUQFD&5^(f|8O_iqa|FD5Zq7ARPiC_&>+{_kGrSUf#aA*BY+t+UGv^ zK90|Uw6vVyHK>)Fsea6HzTp2cfXOPOsfQ~2_#hTBwP1|jgx8fZEl|u)qvnXxpAmXR zNF<^yjlBwyw?XamXFeumgZdW+7RNP$n74WYtIpSUcbO>zLEVuEAmN!w0httoU^`Y5 zVb@y4hJj_pG@^xmu#|&rP3{}X#!ci2t1s^#v#w|O;a{$g)KjD8L#Q^YD8^FBFF%hX zP$UY!6vT8<0&8DFpzfyObP0b@qQ5r@*c`^@idlmX6?*-FxU%wHOdrf0aD}&xz5)=a zF3#6R(vO=ydg0(drq23>GewWO3Hc}!PL0&{#p=z>-(pJg1?(1~L)`SlR@cGtH}x#h zWEskjy-JLMfb|F!a-Q%L%vPE3_+OnKh4+yYgd|66MJuqIDg=iT^WeT8Uj7VXdv!0r zs-t%DNm{zxx2s-^9rEk9#kqvRs_?#bBjk9K5h*|T(-f@v?Li5 zjf8dmKb-W5V&ai9rG-Y4VqdeDtNHVLcd(YSGP&C7}S<+jh?Y`;a6MEL-Z`dI=Vvkhddy zI!j`B9c0}`ys7k$!Vs!cXjQ)O?om<;E^Y8^4*w@jj^>E(L9mtiULDv|a-LUxJX7Z( zuJ61iS=d}TgOW{`%63mbbMY;R!)?V9^zUX)w;%p_Yuu#W6RkC004<)xi&IY{=&DY` z=L)xc`2rpCQ(^F;w*$V*PY1r+LrkZ*Nuc!fXoW}SvA`N|)Ur}5^*(oN-c%4S_0)7+ zs?1ay7ENp7myC@`;ya(dbou#% zL|`tj6X;7%#K$?rnsFmWwibO)lXz#1Q|*}&&atWnp{-zfHl?YoAB35U?;YmH#=6j5 zkw|TEW%`J0uKii;?p%i+$?!^5fN;+^le%g~yzk85k|Od~-qHt;Z#DJSSIHif$qr5K z1?$6aWBQF#?mJ|Z@d(HMeMa(Dt2N;Z=Og5aA)oPk6u>c7oW#C7h;wO`o0ZfNv;Bkp z&di|Bt;FwiCGs9f_F9gt>wx)Td>nPnSP5F~?gQLVRSltoi4WuzFadPCYsb<4e8eJ&%-2{c()S^xm1=~k+1Jvih zr@*r^@Mf^wRTBzyrVsIvy7C3O;+FY}ck|%2h5(@jLcU+Ko#q8F<*mnB)QF03V$@=8g)|kDQKv1eZA~Cka-; z=>(I9Xhp1a|4BIEoVY4prkNbhynEk9$-SNaS*sR0!d~Av;8GwA;Yh$uJVTlL$ob)l zX;=1T5GHSuzxvUdwvb%bs?>M!qc^Q`78<p?BDXi19v^X%*=f zpnA+(E;nDMyX?-iwQRDvd$~wgUjsM#w$y6g_$BZ7gR(E>OIx?PZ@bZ?vq$!jcDs(u z)w%79Z~gFr#XFRn%Zvx-I+`7uiszCWlgIrmY*h4)#N=xvR$gWnrzB`FSBu%be<{l9 z?l5)3P=D3QD}JY+o9taEsntiD1NQmXepSD{_x7jjZO?ahb7g)kztt{|c=Q2)3+i-A zj3&R_nH0hkhNtL<+d?KoRM6))=E-1>H;FvEPd!5o z*+!|~>-G_+wr5`Wlaaj>B4f^aBt7NX=!$E$&-8~2)u(~#4_hdURO;y%yc)V1Y3+W& zOt5&qEm+XU+saoYfVcy-;VR}}w?k(`i}8OEXL*X+uXMkmbf&!IGB=6|0?`0_nx zMh{tq%8yI%rWDXjAYoiWkiPRslJvfKI<8vkGW>Sb?cV%X`y_*Yn&BC{}naTKqoK9!S`vScZ5QDpZwi(z|bmlNKl|PRo2Tg zMYKE!f*krK6_IH7UX5X2oUQ$)QDPcrT6qsEQ&*R`w6@pmXs<2UdQmI|VPApFf3)aP zX*y{C5{3x3)P$#erm7d5Bkmp%PvSJ+(6)B@%S1%~#+r&GNN#{z+sRMFIhR|wTzhW@ zH%j3dGxpyk2S`0I&}dC(RB=J0lfuhX@!9OugFKWQP_mtQ?k7v?Sz=lwEt9(>%*p43fIxK%&#ej9xD;7bFr2Xrln3@4x?5-Sf(@(iC z52#XP*RRi~^CQ3Dmnht3Be%w>(9wMuNrI!FPJR^0n;?<&o<~&=J|fB))>Gj2$)L-t zYeEeCQA-@Axz&8+JuK!9$8ckxT9-`;`ZVIIP=(ENH8?s}#F&f5^>-b#m68O3J)Xl? zJi6ka;5t-uz~V1ApJ`Y`eg@p=g=(@jUrEzhm1wXnjr0T;VffqUfYQG07oAe)lFZ`v z%h0a;w|HA2M{B=vtarG0xCawLCGOC3nrg8Ns=SRl+kQ{y?}F0?(fFl*D7YCm=gt^Y zB(I=z8Y={arkd)pHF_mg|-H|-}+=Gpp9O}LBV3po5xh?5E(!c#oEh#+} z$(zCwiNUJmWqsbhwsKW+lKGkCp0L`NWV&7r^lPA2{>R4VYKEcSX;|c~!!o$2nl0&V zN^CxkLToO}!K7FExIi=ZWmgHSw#F1-oJlcvNz;D)Au8_WUJ(O&+tK+eplc{|9~XZ0 z5(Dd1W(*b24?$~!ez{keS8@N{jS=8({N2FjXKINXW8v!v>auW#t%b>se-WRKF@s>QEMsLrzvneppj-XV>5E`+FE>(?ovFb$cc}f zrG}93&`~V%QK%EAJ?6AxE>qMJig+kaAapL{Llv(J?rKCf>UuGLNxskanmwluj{1dP zBZ#}pfIP$PVJ5G3%cHvwv6*)JB<09>VuF%wgZ#3EAR;hU=u^(G_*@z_lYt}ycey26 zaP1928Ue9#AZP}QQOPW{&zVXXRVZ0mKEeA$SRLW%_002P1EmB0jS<{wE*>!g`LLmp zp#H2I^B^llA8Z3dYkZ5(ii})nD64e+QnkHBL!Hu|d?qpn1?2)dW1wTVkmv6}KgMT;W zPrT?u-G29wv+%1N#%i}5{~i2!7O8Yt!pu$Nt(XbGPT-RN>E?K(UsGQP5;H2IS4kSv;!i|d3#g7lyj8Gmw!Fzw0Nh|k@Jdk3DaWe z+D4=%D3Y+o`P+xL60F7;w0yi$6iY0^e~7CZ?NAyJncs{gp``WHdZhjvOM~{RYhOp| z&VGs}sl<=no_Z8kxhH*ELu7QETeZ-n`HO>TK-;+u5!3#i`Z@k=TmO*fF)OQDdbeKq z>jH-`GJQ^CLvlBq$S+|}?&z1*Q;z8T$l@LUD)cbm_s?&di)wGS!i}ZtG^yCA=rg3v z)HY=0)i^RzwQ+_x<%gEbjYk(E5II(TBiU2!CX&m>Ap}j}lCV<-#A}ke+YC;lkp6*0 zNK;DPM>u_{#_Ro`6Cmk{7la$t@7uZfgj-!lOVQ0>fvXfhc6TJYdUw-xCu=G`o1{R% z5^1U%*N6-46(dsRE7=US>-Vr19KyE24vk4^|(`{J$%N*-DC~1fikLbqX1;1 zdW}mgL~VD0-;Yu6u%!^}C-ut33X-$%DulkPM=-Nu0v;GL?Y5zoB$a{;@fxyf&fw60 zl~48}S4u;{wA`R9=7va*C#f_)jYpU3$@OM7r+#fA<$(VkGEx6E{p0Hh)q>`9q4Ad- z`lf^FNsq}Xj_O9$Vj==NLr2VC%+!KR1+J^;sOx-GqQwr zSq>_!ohJn9U_kZDWz!=ZQ7*M`1oGB@%ULc0$FIJJtYU#6m|{pU%=Mxt#CUK}akFq~ z*9aS81!J1-tVD$|r`-QUnSjK_w7ES5kCRh?89!l+;G)NH>84@zh_Jp9x3Rh`YrBd4TaBb&M?;*ARUAf32a)Pd*6K;7AA0;Yr-=c1 zt6(tcInAe=cuPU3>AKmvjIpA{5#R|{P;}|F$h<;gXeU?Xj+BG499PwGbMyH5)8*<2 z$&)*H4)<%0p`L3XXSb|i1vqF4Q8t6ZaO|?*54hu&X3bt(bg{gm{}Nx*aTJm3-AAkN z!&8iboi==37C>sWYXbbm&vV`1C$2b924pt}q&&UYA~k%!XE)y@`%^FDK6^&;YBXD@ z%A+RVQU{kN1uC3-cMKjzH-vN{W~O%Z@gY^8C_<)fh~3qph1?;4JfV(V{!%jdaO=B? zl4`qo`e0d^`4h6Uy}Tvgas6ygPPwBS9dpd8RqB6sJgC_IX)KQiDT zKDcv2c`^rdUfVCXpr^;{evxqH!g9fkf__ntioiDe3MMoE9QtZ zA}({6fRYV(oZrD)y#HURg~PPs8l|+1Z-)|gC;PF8=ec+cskn_N4=^FJSa8Q6wpemM z*u(Jp#MX_}9iR!z0atHVL&yin>ehLWZ^x@vHY%xi5AL5qFz12mQZNuQrX+GR_i=2v<_49kY)ax=CaWCc>HbH8mVkq zMT(~LO~UFc-@|8NA|TM>vYmx5m^?TsH>s9WcJO-Ol7anZHfTg?^b*YXF7^d;%LNLP zU8FoN^8d76A}@UpR190Gx?LY8mVtDKKpL5cUcgv9xySE2Cvr%Ki#!YOA>oc%;Q)R2|PxrK|52I6LiO)&`OHrS{ z!#F_tM$@yv0XiIekLN`y$6r!4=vK1Eo`6qst(dPp34g8MaeoVlp)NI7=b49YbtelC zLi&>);yt$nb#$NrLbF2iCOFdN6IN{9Rt^FtAca+KOZTQ)W&Iu)Nf)RcVG>!^tm1}G zpq*ge4O4>O5mQI}CSSny3WI?oQ{1$BQjf}Z4V{QsnNnvPPiGW_Jq=OH8P3izEpusv zhbfMm#Ncc?f(n?`6R&G&CR|!u@%}>MWwQ zC=9vrusKXFN+wFNB2in@0wk5ygM@WGWAggF3a!r^NF}$s`V10nmI|lzg&CUffC+{3 zKCV>ptxK4|{($&>jN8x1Xn%LVJ6S<8siuN$_w;*3VU_JX>eY~imvM^}*H@+75=y;J z3Kiq_Q><0Y)2E(b=1|}@(zSl=b|L0=&tXh*)R%fUVuzQxtXDhhndVY>eMxBs;fAo) z_5wHwfG~+Qe#jKAd`y)}L7w>cLn?N;tDZ{psDxK2p33gif#PPO;vo$crfaXGaKDe&`7)aMxc(xa`Qat~9dI~t3t`^TpGuObbpBPH(R{SZRliL=bQ@5_^4R1)aem=b81 z?`QI!m6#Ur>}=G_eiB zBJkzS@B7Xjdm^K*+j=7PmrcOVF8q34Pv4}3IWPIqBW256uK=09f85z%T!*(NNY-e@mDH`m_8_iNMi{v%?~Y0;!}!| zX-WDH#FDzaP3tL1(j+vUG-gS@oEZ4=&zoR*WKsaVI@&;zS-LvT%uF2IlGY(c=TsIXN2L zN>dZ(_HFjJM0ERojTY9Qkrf`kmAY0+isY$%h_@~mF6YzLc91ZFU~{qrZ}MaN27AhQ zDnk7X7Vp^MY@6}a5N`s|QDCacX^)`v-ePdmeQvQ!TPVJmYxb%vzqX+9zHviVoKR_=cGC|3c1=3%iE;P#bu$Tm6$q?5?HLpj|J8PC`0t`*J z5sv7CL>0Nrl}ii6s7mzujA=L7YN1+sh1;YuHQfpgH0S7VK^z2$WK`h%%|nOJMeyl+ z6fQRNj2GVktrw6&mN!q(zN&T;DZZ`NuPI9D_ir_vE&cdvWl3YR7A%^$R({0nB{ zf~8liLwIz(#s;IM*iRGyo4j@wvqwe+-=LKvKXtV@{3I9DkdS_`=2DL(A*SJ%o30n) zm-i=Ffw($_S(Pm_N(Qo$WnJ{6X#e&@h}UFuQ(!Lo@4LOKTXT4HD<5vDrwu7r-0zaY zDe+t3)@FUCC0@CEiLqfzbWwmFpA#Xz)8 zS;xS4$?xY~HkhI0XSEIa0P`s2L)$%z4w99qoW;s17?HMXc?K{gSMeQkPGlc zMle&6mt!@JafN0;MSx>9{qupiba(Rk4v47GArFVe-^8d449F=|6k?d6?rahd)B|tk zAV?(GMZlOJ3gn_C08ZcD<9omO4F2K5)OSyq_#yj)Z0}dC!7$xn)`uaEVGRYFz$^Fx zusr^%iUt-9zZlL9l3)}rkewe8b90JGS$Qvi85+|LFk{;J<{0AT4tjYSmqZbl!+af8 z*kz9W_r(h9{Xnyu_TaIZxOJ^%NfIOa1KXX?iLT--i!eUMTo_|kfz5nu~`Sw8f&ROK3L4i3Nmq~ur7ukY3 z(43*TsjJz&$;sI?FvXQ9SY>-H6_OsNmCM<$6C1hZxc~G-lE5Af9G>|DNewTy#L=4Xn1s2R$ z0k67`tJ5mj%|X`JBM9tHe@r@1zO(SK*b3|Rs?@c+O^})@ zFrs?C4?!N&70xKM^}Xxz#i7N+S5D|N&=&fg$mEV=p*6xF#%5e%w@G?Ts(l!yHqG$$ zBkARYVwJ^6vEEy{KN^*f^U?bBc!`Xm-h^X`_ z>p_|*4vy7s+DfrL+y)PqimG%tosW+#;dUXjFisc(SG?E?1ZwIgXEAh>Ark7~c8C+I z^DQ>|ZTeX(o*I^jTASY|Xc@}jqc6fMS^b?(Ak?YeXPR3tUhyOnHt;_!K+04n)&^IY z6)*NR)K~d{;bFftod}e|qOX(sb1Kf0Ohy`3vXGemnuWnGpGauM-HOR0)|)*} z1RZ-pOmNYa)0gNf;;N#N&m*ZY!NA|D?#C&P48n~sNM0n+wIh8fU_rYAbFWntdzQYN z8WtIDE%>LVL|G~-!R5?P0{=hl01~You*%t_eXHi_U;t@)DdCiJ=W(-cyj`58i11T4 zxW%CA#fac|StR8%;fmDU1-R`z%4$G|rrRkC9;c}mO#Mkgy`PIi7O~{}BFsvAiuV#= zN`$1SkMXfaxW0LPgzIaHDp@7;$@QgpW9czmnsD7(r8~gtEonag!Tc9suab0ZehMZz zL8Dt=x0dosWnNZ+nV7>slV8f3%+&*#>vak?4pq|wI_%ke`Oq)7ET2x%Q*?J}$`wAc zFma7wQnCV)pwkw+T<a{f&d4WtLirlX*FVTJ#IT zVk?mqGnhOfx`|M29&@AE>t@PLEjmJ5$V}j^NS^Eq46a-QU2@hJ1C;SM7vGfCj1AGiEM6Ky(>=L8|z4FRq9?_6L$$@^9g8? z>@L?hQgX>by7~$}E$qKe&1@i`lID`n66NadmZG~$pz{j?t7-q0(|UY7+>XRVQvd1$ z9t|#15T=(ATckYv&gS~RX7Vc;f49H`L>k#jR5w`kS zc55u_hiSRyir9m4VsQvsxD~U++#O4r$6`^^cj0o3Z9p(}NrdHQRPZ!TRDq5IC#C}o z$V#*c6`l%2$^{ez5P2fr$DKmyMV_XiC-8qCuKBadQm%}Y-Tn5_k%K7_#)XI=ojnT9 z_aIL11(vspnC7@+l>Cm?V9tRLv22_Y!7TLli>}KpyK<-@&^q;DOpoL=IF=yK_jA^U zh4*oK$%Zlk!e{sd3-^h#E6QlLGdlMvO%P4B`RyGQ^?Kkxc+vqf2Nl^K*wYO5Amb(o zrjt7CHv|bimv{d}$TdPyi7u>^bb3WPBDaThl+^DSE30#As?%jiYs75cWu_v~o3Z3y zao#Qg3H4Fp9V(+aZiYW+w5Vt)h{KA$QP;k11q=tBjXfKp?8+KoWc9qY!fKhI6i&J# z7I^bV*TLsw`${8lVFCK@&P8Zv=tcaSh|~luPxgY()ya}HTaRqLQ;W7utN8xA60`g} z`o2uvBWIrFj{)%!8CDQv=~Lqdw~`1CvGF2$+-`n;OfAkj*XY}%_F0Un9(;;pqD0TR zF?~{t6|3^S!d&7+?{Q6K^&g)xcM5r&{dl;3`M1@B_W^gt!Ena`r@uE7TIa>wnh<5T z1eYSfOekb6aNZ_MFwQhi;Uw}s-7R;xe~hJY*u?tBFQVZVIeP!9eJ4*!dx0JsF~*a7 z4?N3>Z^sqgTKe}m$`shl+Jl}+g3qT(2!HTI-~42RtQ&W74zLG>9|YcPP#bnQ=dA`| z^$PPVc&k1+ix*v@aSJE1#~nDwy%YzcL6g|rNolt)oZ_Xb6vD0VLaaK2zTpuEbIQY| zyXy?vQ?#Y(iYf2k$$nFq9P_!19x*wEfVnnnyVCU1odgJpR}}@jcwAxX#)eT_W!^EU zh3oAQ^1GLd7(C0OzI-L>iD#ugL_U)P=z2U|1LrP5Yxx$8iEsL(-M&v0b>=vNt@?Ds zd)v20IX-MT3Wr7bvi~{|8q9fRQ1W(JXuhA%)|k~WdqP#5SNaB&CW8xL2L$~iY4M#9 z3tWs?OTMKe$YI@)KM~&vdIM9CvlP!X#H^so40}t9DNcp~_@H-vW7Xxm1zt-5!Iv9Q zZAQXw6A4nrCIxXK2arGMhO{AaX&GS(soWv03DuJtL_%>)xp%iWDs4!54?|s8jiJ4` z@xCPnnC=|dbPhu!bncL?(Atf|1BwX57GmUOM+tS#k4?`R(Y~OwQF^s_vK<(zEU{5T zCU$Kbm>(%BMt^T%B3Q|%$hyVSn$WUQv(f1&x5!oLv1F_+$Pu7MZiThEJ-1!64HU%= zi4Q`+KJIcFD>NW&z2VZ_2S>CH-Q4kp%N(?s{-z}wLXR_qrdkB3YBawdXp&!5fhr~ zj8^e8ieF&($ZgSuJGt@f6MZZ}+CyD+9*MN+^Klub4hVEV{CDOH?0aMMwsFk~y4W;% z1ws-0Ge7Vgl0c?oF~LC}qz{Gos0Z#rHho}-$?X8(*%I(ADjc20#JDfa#*w~Hr;4K! zUDjbVLAqYg-Q|7GhZf{H1r3~*>Xk*R%xcn+*H`P@;8Y}c9`Vb~2+8rj?xz)epVbM9 zfBDMZn2Rz))NyzriSCxlWw*E1^TPHLO~RdSV321|caSu`6lGRHqh0`?S-rwVzP9-I zZgKUnJ8j3a#6?4OWG7hy9_c|QP&J`8Sf0A)cDDH97@8Iuvr#B$uw(*$coLPB|Q@?CG;2;Yc7cbf0nS-&!VcJfiSzeNbSY^lox2>vxq!HzJc0ED7kR#$pA%wjUNqot zJqjI&hRDGd!4aeI6(cQtX(5Q_d6=GJMv2`34xu3R(nD8DUCNv2vpffAY49-hK!ZvB z^^n+O36BAf{ZN;6$)%>C6aL7;FIIQEer%)w+oO_V9X%>g0cblIG5v|29C2Q4t` z#DQLp_^x`T7lD8NA%QF&DdVn=SquL&2$G;4>a*o^+O=`W))4VAifFmyZydu;i6#|O z5L`ty`Z&C`(Ye*Y7cP(A@a~^x``t|rx2#i+TAvDAh6APSLx;37L<#vYYVglwo+5oD z4hfq=s1|oi1Qs3+(-hC?aOycXJ%P(d(I06R>op3S=S+Jm4rg|{)q1Z*&oHL zpMF+xe^lRRW0QyV5nMCNDF}O6Nf28T&ka_CR&sZT4SOV>@33;{y{!;07!-w#j_##% z^+o(+;cOH2_UUwdPG7~Lv_W;$rzQ!H; z1TVI)n#uVSJrmEH>VsZ|w}WrLIk^cyQl};9hxOb;uf>jX^C2N=`#?QD&TnU>)a^** zlVTqFT38Xz!4hP&27m#Nu;MU{mtdu6=Hez+SR5hi7qOO$qiPaaBj_g3r%z2-%;q{w zBnR%FbKdd#=n4nJDGr@bve0xFd6Z0?^0N{xw@k7=B14sCoSXQ%(LeXi3<5jxl%e88 zT_n?Sfv=61HT{8a3kl>)1-N%{>qJYLx7x0MeM@ijU=H!4UfTm(VBOHy{IY@5>?d@% zqRa9&nA=+#6ABe6v*ZohwS%(NI3!{GizcG;K^Bd<3lPDb z!TmRX*kUkboSnVskFdq|Q%ciM;UuG30y==Lv5nyGtOkf}H8IOMiVUlNAD+2xzIpyV zAr%hXw9Jv7N%?B71Wi$*20&mlB?cP*G|rnCvS&;{WfP3U6+;%b#wA#kYHSXya?>u2 zz~(7As#a870>=Z&GFGtVbhu5KPm>ueg3@#&RSW~c@b~lNP1y#(V{CX5G|VyR-NdnP z1$ej>AFQ1U`@Os3gr{C2<66zj93;%#<2`of1SWX#PQ{6wsuzKw*yN?L710kq zYk`wW@bWGFQwTo&)*;Rg`f#hR++RS#^Fxa8=*Ol}E^gS(J-ieh(IoygFQ%=y{X~BW z60VsDDjxkE8HRps5J%v_825pNKRAxcQFW@?q@IUMb#@hae+SQncX?5!GbS@Nr*4#Z zZVdmYBZ!dO1m!JDTt)s8d*v}r4^4EKj#v@tT6{QEn7{&l^g)27pXAkNWkTv5{}$o` z9g!J;y{7hSEh#%KWIko%#9}N?{XWlEuu>}YDxREuTh(IxO3o^_ zD-_Ww0P#|l*`y)JY{WHGvGm$}1fD<~ta}Rn4rHma4`=et`URQlef){#E+cLLIBFGR zx1;*xh$M5NVTx8yi)5@H9&wOmn~787VZ$91(im^jRRnuN56DIET)lw#@rgXHLZ4Q% z+}DEEK8tmQPg>DvMGUcjcY)#fJ?%YrK2DuM#O0wPDVO~&og>Ci%*)Yzu(G`qN%-c) zGxH#p)F#UM*>Fp@nzB7ro#3yG-Jf%qp&%&xH&?5hIsiP|C>(kHL?Fu+e zAj?R1lSzXZhe$8Y@Is)IVPya}NmBtS*5}CS{k)%Cm_p?OY?xURrGI55Hp)d~VMJ$s z0Icgr)*LNz-_EUWWANFZO@xvXM6Z}2V3|94Ve7b(cSI4L_lC57wBr@%0q1m6v%B5s zVpe(t3(i#X3#MbD6u~rNz+qt~z=1~e`Nh}CTJ;f`RS;E=AE0@dd6}9SxG8Id^^^)3 z6G(;1Yx;7OfnaF*aB>|;cPmU;cwJp6rXsw~)$0onVm1V=W`x`+OJ@K%9%cgRUkDxS zwwMemJ81Q*(U?MzcK|@;I|TY2^->>${D9xL%=tRogRU`p7e;Od496oD--4N27!z7i z9wxp+uIo^y<=nC-0t%CMyyd+zK-B*AwKnWJd^_Xp9=IoXy~6jjP(;1r{lXUJQLN9# z$K%iYsMTwM^Jc(R%k=rgDc%$SpDQdRS<9L_=HE3@B_R9+ZIgN;GgBo*$yvq; z;+`r`*iY`nNW&;-!(Cb&p$-q1xXJ&9CJ$fZJeU3Z=N);FsW2-xOVe!=UgUu6KAtd_ zI1hd~y9s1#cd;}vjpy;B!9h79@iDCJ3!pk31j?UvKy{qnnJ$ja51jSbX3_WDDdv7@ z&iUQ-#{cYWw%+E%)_WzKI<2+@Opnt!CS)8TN{kqr@|#ARb3dzFc=ZxoHidpok7$gT z@oHGE`#MQO@Gz^m%mU8R;~0h27#>vJtp6|&{Bez8%cEw`y&b~%-XMps@5?v8D-I^>^$0##l-(aLMvA3SJnanHDzWS($YCN< z%zcRQ>FIO&rmu6xeTqdDj0Nzr4+GyKDxy@J8vqMZ?;urED@NQv5ZjFflUM)qFQ;$C zU~6#?XrGvfI8WfeIG-xCzi|=$9g&i7xy*KfJVSQC*D#sa@BMryTDtb~S$lcG7_Bgo zYxlr_9?J9cFkj1qL6Lw&R~hR8sArrv;ga0f6g4z7J^j@7&l9+ zgf;`y7C42qx^?Drpw0?QoMDO(Fff*Egx|ZPcTN^Lh<4m%tdazI{p!>)}(8-c0 zFc<;tq1Gee&Nk7t-4GAr32aW0npR)0Rypp@_)06w3I-Kl=k1fih>Xf z8by&1S#F(u{1E<166j{&jMA9m*j>uSGOkMf$~Tv@aEZ`qgUHuj$Ex#<7K#-Wn+rTR zjd0QO5jvEZSM6`>E%f?n4SA~&Ewhke_at?90?`4&Za=(W%>B!VE8AkI`CaBkh6?N0OyV$6h{ zX%K8|O5;oU`+hu_xn=VQv_^bOw(EZ!E~Kj;>DR-rBA~VsV&EJm3c`-{;{z$hNn}xa z+~W#jg&Frqo$e&opGng|G|i)1-*CdR?ecUY-33Wq`1uaoKUuj)|78{y_yCu<6fH(5 zF{N(E7O&#xxPn`rgLe4Z>Qd-u3 zn|>K>gB#`%aoA$sKjoiR82Yt>5fLdwAu8@sL~NpvMDn^EO#XlBaz7VGbMve!*Qt=* zLJ2B_;1SbNY6jHplHk95aQJh%qvJMC)P_p8W497R@OL6B5p_-#EynCfkqqSAZ^k>= z@M#DhyHIAXDXdycyb1I_M#bTW-@TkTKbe$h-Ndhq{>{Ts*Q%HM5?RJ7u6dozG&5RW z7e_Ca-^cn;88Eekz6P~sRt;BCE72xVP(qry>cGi_E|L99uv;DR$#znrP>d#px znjREf+5q3GC)qcABk^2?(@&bs<2NY&GZQ{;&HwHeN%gv4LNKt8PvnB zlVFO-35QuYJr}ocEP9r_;Fw^xG8?yNpOCC(tg zxRx{(8NGi}(5E3KwEyC~+Qg@0U59y`=2A~{Jpa&xUIC(T3|5ZvE7{Wvwo3;TB0a&P zsUcEzbBA*jzt2Bc8BV=Jk}kiA-YN!s>YR_s71Os_C}WEp{Yh{zN7%&Nyq*D|UI91W zEQ%x%5eZ9d^F1f&-Pp_3O1R}X_EG1}K#uHRT_mMCjLxxNc-9yJ2o3-?+-!eQ^6x21 zW6$ZNh{Dn(Ph{=co~&o|y2dsiO(L15rQXl15@P87iIdc_RuW})bBPI@vPt9A>dtH- z!~E)Y+BbXgZ4tN;h9s)$y(Px9?#yr4I9LJO+bH69Rg`tj4bKF5J`V!DE% z(Uk%Vvt>>54)>zi$sf65aIz2uBOv*fMdn0Q3tD2nk6X)LIaMW+Vr+f^zCUE1S=f@! zVMJf9TGt044ZV+!iuacJzh~STkkYo9C}@j1_*pY6^}f2U|5LUcS&v#eckj}uVWObY zEjGj@oqm-;!E`w@qX~jkz}NP;Ku2jH=ie6^A?Jl2T(^2v%Eu8z*7E97E!FZ4s94|d zwVkF2J(+-yNE7aD@b5S3>#}nuU8s6B>3}Gx+nff6QQb#kG5K=r(1@{fX5J(7lY=U5 zHpN(l?{-KWbR_B7Ts!cl$mGJURb$lavMOuZC7cP(u@ln#r=S;1m}M}{>UdhgJ|yqT zg3BH{@W!D;j3bMqYEFC1%g4YD&g8OLcax$&S5juK&LCqMCS<>0FiXsx{TTh%no|rq z&;5~fd9pPO(5xi~zmGSD&rIwR2XbZ2J8vz@3Io}=Gl50#5(=lIe~uEkFW(c;K|K!x zXL;K%ESmuRzJarJ!&IaJ)Q!k?&Rf9aTT2XSs7#T%1^ zvCwf|N^L3`!_fo=sgOFh~wf=SdEim*%nn_TfN%iiCCE2Z~5a44D5qIeS$~ zh?Dx_4{cZee#^Le-2UTcGHvMIdW0MiywlVkAcv0uN|kG6pkQotrjp zbD6e!5iKNp3P7=2o9Nptaoa(>43nN{%88Welpan^>3#5y}J_ zS@q!10;~d|rxz~v3uVchFpE&@yIagb^<>ZP(%W+DwW##=eq8oOR_In&UCKO+Pd#}> z+_Dx&27gZwK3rw^&Kg8f-}64UUZx(C{p;s>YV0woT;YOWZ24}5;TuFwWc53uA_x{A z_Ej8@ggHaEnymu8b$jC#zgO^Ki5@fN))>xaz|tiURT;V{Ou8tg&eC@u9rZKK63i4I z2$cBl%7~3@K;!1L{$3vhHqev~FK)Yul21=m>hq0$d(uR2^`h$S7JBT}f-YADb%@tr z0E)5XvKYADcXW)C>^8xlE4>-0T1R=?=VC&Xp7^nN8+M1-j^&m&(akGa@PD3@O16rN z7b-ZfqvK^%eY*GcDDf69WJQ;T0UgZB9b#e?aMz$V|2B!-ndbW^lNWfbk;}}Ecg5)p z9fAEY!{IHO$yy9>J#I@)TMmu?RUm)tIYIDn?&J4^2)fdtjVLQ7jvf#hA*>yCz68^# zliU=ek!287e^hNyQ1NKMmTt`t&=sG&HvlHx_cW!k`7w(v4X^&2=)L^+(_9sAWGz;e z-e!U14~^eaZg-*p#3~uk=`IyyS-vQ@JjI5(H1XPjD_@(>uP>N1P);b@)0A@cYf$%8 z!L}JOA=y&Q45f!0GFo^P8e0WyEHC-hftvnttcKHgVC#v+`#OPY!#y^44t4qp?V{VS zMVyQ(FpUBFO1J2}dY;4^zd`QCU*3vvH8S(+$S8%;2gymxfqzYkVvKtxPgi64xL2a; z=PMo|x8-c7?)K51?Eo4UFCjJW7lI9t4nOg%z{qK-wdu_yJZA;uDJP7+d~f>S55)WT z1Lav0nIoW#bLhczrD%(afv~qq1Su0Q$gPRdB;Bh|mJ6DI9*+aKu_g#E6@TseKzIkR zB8)tDP((2%g0^J&MjK#TnfCN}bJ%I7Dmy}Nw#-lm@F=ay@hzW+3GRy(_khq0-IZ>^ z&gP$VF-KZO$l<1~+@huh$lLd}=oUEi7I!~5khW#$w?YWt_I2rk`tAAs*TJ%xD$=no z^45tY!Sg8{e^9?=N?BfX`vt7Uiw=Q4fOq%xT{FAg_?ojh@R6Iz?werLsGWv!pT?a?ST0!{Ay>)&T3;HXpBaNwqT9LtBT*#su)x-OFEusl z0gv%4?RH%m1v-k1+kmP~`|ZJc2Or>mZ|?2^&%zt66#&Ng=h1`G0_9H}(Y}vf=5AIm zITbFI9>aFjiv$S9noRP{>)h3gf}-cua4YJO8ecK;kXs zCNa&}M0A*^ET?gnm6k*Rh~Lf`o`jTUG41P$1{vixQQ9$vW5Xtc>W`X#R_lax=~Xo( z{l1{{ZJ`IA`0ompfWu_8Y$;LqKP>?9+h5#^T+Oo}_xqv6mm(x-K~!49oZNSy9Gzql zSeGp2ZmLuoGfr4d7684Ndjl$W^q@}Q&CGptzh{R2J%@A7%k3tL(gVULpnwzpEj2vK zf{;GQ;yXym{R=vKBcZ5=b>C>-aXEvX#3Q{b-SQO1+c(sk81=)wte8Nz_pu#W7NHgQ z?AwQj?!J@!3Xl?de)FJBrMHIf-Xtdoh!2%Z#;-&R4CR0vv5)q3G*P(v**Q@dn^Pc^ z6Ln%cs*4Xsf8fQTz{u>U3P+4Z0xackZJye3b|*3=ggXvvdU;;amAG6}0!6ZMJa_{> z|G?R07wuTnWI>+Td;T0kA0{A6ICh`De(B$wCJ74)&h+lk)@REyV8@a0^jo_LW55q) zVlo$EPb+YYcxoxH*xn5w3xZ@z`z8SLoJPEbel}fwvh^$s9kzYDk)Q3+9~r#oc3sN1 z^ymS?%D*&%T^4xNIF^<9)w3Q8NDOAj)Y;$tK-LL0sWlJYbZR*nlk``YdZC{*yzF-p zFX0?#9JN=6F7v_gTdIIK!wslSZ$0*WoGQdI`Y8&&0P#?(ME;#ZPN)*%oVI(_gc7P{ zA(m-uC2g1ZKnQEQ45eXnwJ{K%ZZeNPyD-oe1hRnJnqZyw;X%46Ao_OXgD7b;aExje zMR8Zs^xk;`n^`5r&^0jgKTF>%U`|_-#-fC>uoeT5UXEizt0KCkhC4#cvqo}+?^hZG zT%XOh^XUwdmyUrTkRn`;CM~T_!&nn+&4vJ3|7J9}!gFSC{vAiUyiuEvCoPBy&GlN1 zZhmFdXtJBoaJB#isS3K|oUaBqP(OrZ^K>!~6xF2vPhyTfQ1{=(l@VaiANu3sAjHMz zc_`E#(p9kC&nTut&bS@twJfx*{LC{M{P*(|;fQ~4<4G4w%sCa`q9Um@E12NuO+_z@ zkN*UTSDc<2vL+6?HPuV{jz^KVESN#c`@gUe5y+2%^iaZ2`O65*GTw zwLW7X01kgLt&N~p6d?ktWq1WN$V2J+51_C(54ONvr zr;9Vlpg?n2BYdIk=63_2`KTnq$^!uKf26f;=whY8ws@l<#Z1Mcn*{jq_iz4Ou5E4Z zsS^eBBMt!*ewe4Gwa_6G*mf?f)w7&H>czC?HVBGsv$J5#G&d&USOnW@8-M^gUY6XS zNOI6e1$k12BuhD!nM-*5Fvqp$m;2~WgDDVp%l!rrn2QPe(GB+&A@xIr-*l)x0AMD( z;$?}8;uk1Yvc{w7hx=MTEd<@9Za+zX0Z*;!fy*Zfwd-*uh6d;dLmVb z1&)CtE11XU&|sy;TLtf+iPh^%FYc z<;di_E3kNBbYb=~q2K?Hc7k#{U>yUa4`)u_1{;~?>qDg||Qi`>i7kOm8$+Ube z75}ab_UKVtniRFy5g;)MWm+99wc)S~(LT8I>P}(U0x)3jHDj<0HZ;ts1PL$!_JZHd zyj?|OVNx*RN!TGEG==+o9n{yUHSd~hH~iVg48?(%8@ROs`R*~uBqX@JY3|Anw*^It%)j6J5%Q>H=^gU+$ZI|{(S)uTito(J_Ju?lk^Y2Bq_fcoeJ{w3 zy>+z<;*}odQGo8GZNH^WZ#Vldo~F_pc`W(sv4C%oszVt*^?6it`7?Hnj0VS001#av`QfG)&!5{%uJx7w&AcIC*Ml3Poh5_SHDC!8`Z4{rWNuV*xoTKA zB+5!TB2IK~Oy_MBf^V$^LK)Egk{-#c7f610=_nNiS*;I`Dj$uKXGK~5!biZ^wOX)RyB@4p(&p}Rd`QEYW8!%ay_18`>v8z5msdt==ZD|BjQpK;=js$7@c&2E zRR&b`JZ-w;lF}`uq;yM*fOJWx0@BiP>5vX-X$b)V6{Wioq(M3arAs>Ay@>vQ?+3nd z&pEq0JM+vl71faxz!fxA{bEA7ynYFTH=r!u(7lUEM|--_r!JaJ;U4R{2YHPZXn-Jc zDcZ?>`mDc>>i)`>NC@;9$!vVVV++t!&e~xK&LL_PIcOm_jafA}X!YVjJnPvTvTpbc zKEhB^o@cRF7XUY^5#dAcchVF{NQ0L0O2vdeZO=rxky@jn@7xui@p@dZUY@-ZI?W8s z!ijVBKg1jqIWNsH{9L}>ma#s2WR^qDO|i3LYkt2mll!iL1%1f~z(dc$4;UebgC+{0z(#q?`&+Hb{ABYlA7?2;At(`jvyplOHv-!m$X%&$V<1$ zw_yjRzT+OK^3Gp8kY(+fJJE~DBZn{KWYlISihA_Cd6CT7CY|pEPeybi3M6Jc7t=rU z(W5H!J=0WizedU`Cs6AWh2UTb=*Lo}p<07xE$8 zAX^Cqh8ApyXj=VDU8<=fD3Dg3UEu>17G}o{5Altr0!($`<>@&MZS6?!!c5{S`2y=I zmMN_DjDg22cI*SUETmK5hwsUrobE@594rz}8ltH%1&R*L4u4k7lKc&Z0iiz|FM#B* zo0p&G$(u6*Y|$%Vx9^X=A(umTmdg_JSpi*G$1?mW`nhC9PaVUXv{H)6LlupXiPKIpM5VkDCpyz zkY1$JNgkGhswfi*f=NG#Ainy1fX4nxAVLc^c2vPZhxwDJT!_RoJP9JXGmN>%VR96r zMTRQXvEFZvAu=8Xy-7cYiP`Sc`aBDz&;F3Gmf~#_)Q&4s0)?en`pjGOmxIRnntb@^ zaDoy;Y?y&qXolypw6S>*=uguDE9}V%$GrO8i-R-!wV!SWvS1?8sE6aYuGMpY{C!*X zj_-O75D5!Veet=<^Ec}(wb=pZV4W;RG2rKox)uNpq7|I?H~}w%YEt*sl1RcD+2ly_ zrLW1ffciYbvfR*Qdq71sKFIZK>vIF6%{E$J9gihCOd*N18-+@T=u1~d9KX&IzdTIX zDY3PtB>|qSBq)RpBQCi?@;NfiMC+1MPB24%LQM47z|1iCV{1=fT7btLi&vj^>7iyJ zf`n@BL%A}N0vBz#;-&3Sex&#%co5rZ&;Ys3>^8IC<(GDGlMIF+VJt`~;tpb4la*52 zP1n9`%(5lq_7Ny=a#1G3#RPFv+Vm6DlzbM?KOF_=WG z44_x_#HSOGtY5*y)P-8((+5LC{$a2JMrq^0i`@I|EAflXMQH%WdS2+CuwinhQxZA` z%{l43GDMnG#H;R+jZh-WN68s0H(~fNWbxo-3-TSE6Qe_%g5UQCAk7Po9(gdE0c&l+ zYN#*8C3iwh`;rLEbxn-MBlGJF<8|l&yvlcgZ?<_^)$363HWTGE*ub37QX$9tsCGdp z{p>bq?GOazUucUmIy=q&h+~xxzmuVgOTH0tn7ywAKZJO5C!0ZY(bMcd@JB)l@Zw9rvT_k!OH)o7S2(SX8469@oeN%s1 zj0l~x<=|@&e^cj|EXU*S3;_xAr!o4V=*wD1mv;;eTZ1v#V!75~h>ld)GSd5K%BomQ zsYo95lK#lb+m4bWd@%sy_JUC1o!;RO^ICzdvtOO$V+(sFPcw$&1V4KnZzxL6fF=FnB8Tid^7=# z_%1x*Aht?Aju_DsI~^yQeoxUtIrkL=^)iH>d90b z@T}MdQA0uwt^4=do@epG3=r_a5MZb`;9++WraOEO(+A%+lvDFf9a2yWw6km%I0F2- z=TlWl8NQwdziaiJE`E|~@RdOBfMJv4Gm%A3ll`@>-=7!uz;be7G%&0Txt!~J4t_PGz~6qGGwBj3}6DLpnXB2mDD~r z!rgmg^9|SpkVOH!wO%ZulH~+)-Q5cygw(&Uar@m*BMF0M(uMLH}Gx_A{WsbtCtTbI;VM`V_pB0 z1>E4wAxL)|T7}!W%Zz|-on_SO9OHYTgLtE z((w1WbFGmcm-sJ_K7K6PE7bR0p@pxt&~ZP<<%Vg2krFqw7UFWJq|!+X2wDYQee#;5e>Su@(MauuQ zKnHCNGXujdYFFcw+ta5p3myZ;jyEW6={d6s&=uzD#K>+Wo3e~zjp{4f6-)q!CSq&?gD5@gT)6jR7@_UT; zsL>eN4wNe3A9cf;h34X2C~~El8A;VS!o(jDCjM~`+aZjlffQOnnoS@_zH24%Axr3R zrQYxL;abgm*k5^vx#|%W1#r?eu*^oI2suswM;fDAfUgimS7>m4G+eEezUw+%4)e80 z=_DN4K{#E1ZV?Q}KzFXNA&Jpo?P$(V!D_$bL3JuB{?*ap0+&RfPJ@nFiAgbqCV^2& zXf+6Q2GD%z2WG&pYL)!ni$*>b{y+Dgl8NF0F6Tvn<)y<~O4nuoZRqb)Tq4#0Mtt zK3BumMc#GyQQK^U2|g3dlZcGuV<2Xg6DgZlPNho#P0q%mNzBXRu5krnKOe%G#j9{6_Kx_S zJd|2L`@K;}#M5xVrc7CA4GQ=k^YFHIfcq3|ZqpUo48mWm6c4Nh5`T>p+Hg^0w0{YK z*J}dB;lD@KlQu1IEU-8aqkf8t^=~}|5*y2fu-Sl)&Zrg~(JM_+dHsgp{!Iv~7rC=) zj%2WlAzgc#iyZS&_e4})zJ869;|7IEqioFy$Ps=1(?v}!zC1nl(`N9c)NtifSU0g= z*<9pF+(7aJ5t8p2cJ^;?9IMJ0VClI8LW1r(cmryJ?Crw5PHE5Gz~MarNcyEn-g0$* zc>D!zy;IxJLxw+}WWZL#!0bc#+Uq40FgnpJ#nUqRCTjfL;zd$Orfg*%y_rO)2cSru1wfAx@7V%{BsEDk!@L-sd*L`RpCyunOL8R6{eb2? zPYKO)W4tW5h+11N>fNb!5&pUOd3n6Mw=DdhPD9XrN$?SYt(THs_lCUOkzqr<7u zkG5k9tQ5aX(}uZt0}D$xt7Sdkd~x@cr)C&8!>Y%iuR#?YHspCOXF zn62*g zxpd#QA88g>k{xn!_gc$d5wfxJmS|7VY4=-H%1Us%1tNQ}eYT%x-*|L-*hNjo{?!)- zqNZW)?_6WO*}3Qg`PD!ApbfSuBLRi$3WL(>nC3SEW~d6F{0NqEVIgS@7>CPiMl3|KH45|<9>i%H6u0sPiK0sy*Dpmku&UfvJeN!U&Cpy_{NXc zAkor0if2ohx((JzZ<{6H{Le<_N!L1{=Y?OifyP^Tn{GF(asZ=-sn za{@F1`pD=d_@Jw$i-$x0BQ0q~2xu2qXXp01pk64xhH#?Q9o6i;B$^C;2ck+FkM)cW z=yP1GK;k?(nP3j|X`C$dqACbGZ=JqBttPT2>Uvqig#AFM&_N>SSvz{vK~NSm^&{+u zWce52;U#Cw*QIw>&|Nz$>0Z7+%sMy4)~|5L*_>=NaNV9MR`A=`u0oaF3dfEoo|5my zi}1ojh7J?7Rw_Zdlbo((&%L-WU0! z%FhqLa-^ZsC$MZ$H(!wUnI_iK6c-wiQC5G>YdxMgvN5h1*q8Qfa`7h9V+-^{i_4)y zv%zmtM*jjM*Orl@h@kBP^7*<{(VT|Kz2e3=a=S zcz&lC<+;MjK&Mi=`6MtNAU*8x>Vn>5c%53Bjt>pFQ9jr|7uNE#8aH~>`1{L0Isy*R zEwXTj#zy$>=@jZ}oY&UhLa}ilm1Y*kAyWrqE68c^+nU3?a`FtFK5_!v&FD36_m52{ z`;VtCj_3D*AJqraGE?+46Qsq^2vd)~5>+2Bzt3O&6XP zgW;p>zyy5bTnHy%dP_HukrSMUs)=eXH|jZNGa9=-R`Q%z*_V1J>SY+G@c3N~u2Oa7 zAvD!Gryso<5wel?`)Oc|?-NRkAhiQ~jfedOf9St3EhGoooHMD*M(gwshduC=A zqR3!@=KLUSEcpVsTl5iUy)g{*gZaaC$7Fepp3*1WBabtpK&-BlP=}VguAM z-1}9UzGinha-ZUCo<5G7nbJ`Qk4vh_&Xr2n3#f@a)BCg*0UG$$A8hmb8M(Qo!m zERPhZdvfYLe_KHOXG-zk6QWF zeNT3dBz{+&jdU4jEXvI+ur(U{5vXDu-9Y=LQ!kBP^HBzKwp2%$7=E|y;4#{7Sn=GO zInWPoy)!N<738%?lJqekzt^a#(syq`USIrl6EF;g3n7su>x}_GLN6&A2Kwj;5Jzl^ zxQ=s~#Q}&I(`nxeM9B9J#!xG_)y+)i>hGQ)de`fm7+ES@)C_P7qu0?zo(=v1(U~2JzTp@(zAg%ah{S<6yrZ_caobBpf_Uv0g2kXBr9niVpEpIBJ% zq+q5HR59j$lGiHT5+e#v+9Sqa=IbbyoDOp0yIHzr3ui)~tuSlVe}VaGuS33`S>MM0 z%z^#J;?5p&5Q-FyoE3Cll?ScX)q~%8oOfBm-=zxK0i6=ek+FwKMffZruOKxVMIG77 zkH^s7Xv|6t!E&F`3~xBO*awhi0jZwQfEY+E;QmVz=o&(X!c2p`4G3SeJT1@AcUcS; zTjnBGUA`*xInK2G{zcXKsp&^G0on(=xXx>^3?y@UWpWHG7-I!+q#;E9)ao=9d;=(MvY(n^u#`d!)19R!?vw45lgFkKPi=#*aq%9F zPz*n6Sh{+UsE?A0VP{2+_X z#%g?%(dC~aMI)%Ic64>mX%|W{SE;nJYtj&$I9tcyu|*wU?_7BCz^|NvQt`EYuXe(V zo={#6I%lR0~v-d@%CS+079cM(qF;E;J znT$c~fx9xC5zL(DsCB|`^{r-?e{~VySnUI1K9OatuR}7QhA)sYgA z$^0x`QN?1k?C*oTlHTGp%9H4#4fzUf3u`EM4w>1H#2pA?F4MOk3}|;yymuNMfIp+E6-;d z%|fxx7Z9gDK2OlfIU*L6$V6f%^nsGllp<;3n6ssG!fbb}GoHjlgsEvMCD&Da-V+|3T?64e;MqQyHIH$53jiT|h`?`OmodOgR0x;j88xSp?k>K4=e z_7h$3D+SZRbcP&#nMbxx{__3-p0Ci5itN5p^a}YOtR9PB#Tq<%WRqB*^EVfumiouB zfM-hjY0u(5N3Ft$9^C5BxK!3*!CDZb`Xjmf{qStbrb8WzH+K7RGD4oYvq1rve3Q zLdluEnoXgwpAYhL`%vqDy>D6x{WYT5Apb=mhy{riDW9I?K`&~Ei6@}B-l>Q5j;xgD zHKvM*n6KTV&qvJ9t6y;YUbyp8p$E5Bm?JK~u$5LB$rkh1agHFDmy49U zT9ss}%F9Pz+Cuhb<#VJc8J?<=@mo@o9TM=H6X7iq2OXl#oXQ>HB_E;jy2+R{hjJ3! z@|QtyD|CV;=h z${<8HSgf02)=pYwjY?gnw_c|22~+^b_4cJUpWS43l0we|+3PQ;Ve;3!ZL`8CY(7wp z70=SU%w*}nzYlr_A{m;L%j39S)<6{kqQ@0Uqa!SBFKae_e~|srl;7d?ban6IR{D?K z@v@R<(u>R+*RhFtHRL|WGHIFd1$T2K@3LHszKeH|oPyeu(%~<~c}x^2(2bJC@69)O zB?ZlZS;J($UmK~09;lE#y>i87CBbmCz?yfJnu9 zJE$Z?f(*n^QUa+ku@oZp_-ILLY?WnWsLD`Qb{E@XdY+*14CIAJ;8ua`6-#R#Q;~f? zKGsTNt(Lb+m@TEc^q^D3Sbz{%oN(3i={jxf-H(!#T(PiX{ktS}Zad}q?}Fi1v+s1< zjX}Cm5ZD!`f|~?+F+6Ht-#1`=8WVE&0sk^wPmv%~OcVa_JHi$F{wKbASi0#>*1s?N*<^_KA}jkzoK z)4EPXENMT^9h~2%CpQex9ozF4dWolx2WJrUmXThpxRsZDIzHCjF7j!CX$9B;?!qDx zINlVKVaAF}l$U4e_B^-~?coHP9Rcf|KSK5-13weIq+cYyr~}D>Y9;26G|P9rL<&H% z1O>_GxZcyBeEQ|ApE3Ct8&4=%GyS?pzW6qP9`m}+Joro9#}`r0wH;#|2oj?-a>`XI zbM?RH2D7@A-bA1sM9_*#JkMP-ods3i-q-smFSXJ=h5V~aza!&i{g^@n@^elGM`Erg zUhgdHVnxy9%72XVkm9|X;W!XJppRzq%}snri&z}a8d5USpW2_P_36<44dgr;79t>r zFXQz28JoX3ecqJS6S}hd7Td{v#8dF2X)1+YrG{GdRhe8in|4QU{7&v;Ad)hV>PrJx zSoImcz5l*;vn{Y-O^PhI$SG8fn=j{BsA!kaw&;-pOZrLbR>%+DclVFghri|G?&*<1 zlLbjiW8b7NyWr7Di!)+K)=36}UO96xEXm3sRd!bbss_7Y;Qlqq@j3=MZ17XbOEMnY zqQ!{Hx9k0L38wRW;9bV_3xq7}9f;SVn^HbXaimTzUqL&lwGuF-+8XTuxR?R6QOiQq zD|eF$(9;>S*cRPalG@RiF{ZvC@DWG5=|iTIVbEj-jY4@GV}zy7wKqrHFCj5gIQrtk z>NCY$g%*;`a*lc2N}ycE$d$e^m+Q&3&v5`@RP8l0oueuA!F7+=2It1 zQICkQO>l$8LfAjH1!LDPVX|M~j^U!z@+St3WY5j`EkJDr74A#Eb#f4ojk;LTzlz$( z9&UNY$N;$uF}E&OE64}^LQ2N{HGcU1krN08Qc6=wVM@#$4nsPCIAh0FJQWqL%Ofcw$fEAsR_W0Y5t75$V_Hb+fqt;kT0-*5 z*Ax8jNT$|+TTX)mq48r=kR5)!Im2G%7Q=8|t*7R?6$_@DCq7f0`=YFNb*ZgZeZj^u zjiAkQmY3k5hTSt!e!w+WybLE(hx|w3gXc&A2jU~DG@ip^J4qe0Ei(j4whK7rG5t`G z>ZFvAzg#!xZ91S+kPVNz=t5GUx3JJopm{0zg>uDg5C1NM(OkX4=ALMzUbpmU5=kF2 z24P~*4Eh=6RnzljS!`tK(}Th7S#J9|k-NAC?A&KxecXwpwi~%w+ZU+dwXslsC>JVc z9@Yj$N_h)8@u-dt)|W7HtUzBjrCMkug+pOOOrpZY<}-a?W$cN~LFn1Z_P4U<<;F zMj=AeV{@IDNI7jj|MC4@k6FD(r#>~^KUzJsp;e4X*jw0y@ z=B_yzU;ApxaLeVYSiO-h!{PcB5o{5HU{3GMt6*PvWQim9!vGX%X7x`0uVZO^jrE~ii!L;iK z)T;{evF+ZcF57%}&5C5l24K7$?Zbryc)`Q2b^7+D#P>gTqhG}DcPTvb4-#>gU^uPl zZuFCh$9>f6e<{cM1hx1FtG403;WtUAEPZwSr~2$Q5yY6osarfZo&k&mw1wFn+eme3 zuW;3f+>nUk=w}S=pb>L#$%TstQA55ljY6BsOp5wrp}eW!%RPeSsbY3oigaFk-+U6q zK$J$#7L~)E0a%S+h{Q+Qg(m{CPp?bGZys+>3NM+h$|)qX+}Pk0+dB-2PH56-RrNLU z4RXM8?9)7Dcw{@G>!e*m=RuyyRD(f;vq!Xb`0vYHU~aNYl-%eM3f&M96HN?BumKIJ z!6cQYrXUR?tknyvyf0{3AlH`DHl*)QCV8zayv4(&CzXSUjSEuYdNX$YHfK>&^yQK` zcKv3(gq7Y5t;F&43NGBAdL3w+(So8lfO)?nCipHJ`Q+!mZG^g*uIamEuZ_j2I-xCm z15-}b79dJ*ZV|<}CNj&f+30)|O{8>T4RQTeCt5 zs`drrFS`$ITxis;o5a5Oc(Q6m>^y|WP=O*4X2T&f8cIY()7XlB)btd|b6nFZav|%E zM5vw0#*trdUSXg>JW&KF^>a2+eyv(r7I{^zG}li?7fM{Go^ta*gs1m}7@ zMHY(E34dni;RvjUZoe+EiJ(k96==MC(qu@z{PbN+#>I)2*rk<_aq)+{UF6k~Zcjw3 zbfd&VaoE@4mU@%!RoYH*A6yq$STIk->^l+ImX?&5GMZ@_SWw(YW|sK##R5Y9ZSY-< zhUVL~$W?Wi)*Ek&=znWa%8lCHKkL0Vv0T=uIqGk+8b%Wp99)H0f9**PqDtnRpEtMJb>lO$*4SMla)saqDyB}yhlDs&~d32u6o=qNcV0t)J%&_`v_YLmHcJ0 z#^tw9RKMRN1_m-N2=E9z3kV37QB_LcQC7Hox)5+ssG zza_Ab+8UOLyUSgXo`_=c3JE=zI9eZ3J#jM%WPT6T6bqa!W>QLHdVBMvU+m4$Wfvpb z=$IRk|0hd!@Vr7;$Bn{yjNEP)WF!<&3?GS-WwABthHv-~WrU;WHmZle?WG8bp3KyE zF#fqQ62xGWQi5M~(%#;J+o#E~?CPaPbBJM%jl4``a=$DL@3j`NDi3Th=2F|afQyxH zyn1zlb^8n)Oz@1hSAE5*e+B1eGvb=#QlqazymIt#C6Ngdelc6}()o5w8xOt-Z9jXg zQx|uK^5&#g8o+)lU!{P#@n8b+U=3c7MjiufK%yG9f|uy?(z3K4b`FW za)u=T5j4VViET`IbU_iMsWJclCyi0NNiQJ0*b?_qYL+cVzMkpej@yZ7afxN!?EJnA z6u!$__xV7=e-Gl6mF6x42~Buf@$>rO%WLxeq_T>MH@K>5(O?ZZo^{y>$0@biuR^B6 zjLs}4omc4{_mU(;-n?(k;QzCVg<~WVhZW@iDK*%ctdvWT&~0wz*R5Z3ggp~7+?gp52`A*Q8n$91IP>=`Z!MT}O&VBU@7c@qE`p4ZAa zDEN;IONgOSluQ+cpe)RKyz8+&%imI3>4b&7T*i#>uNF0Piz;$@gJ>$pUx_YVN zRh_=d?Y9o;D3EwWy6J%#>)nNFqN&yL<}+HwmUrwA3YI^*@%=p@PwC#?gQF zcUG@5f7r`vIWlR^Un+vkpCyBoT>=5q>)||_Wqnu=gu04Oh?5%%Zpz$i#dvNQ{w>( z!B!BF;}vS`4#8y#fx8hzr&s?pDwHqDcuMblx$yy1{|5YgxjTre|5Z90JzqpXf zzA`#Dj;PhL_Z&x}RH|Oa`)a#yd%oqDjZ6zfk-;@l31S|t98pe1sNe5jh#2;QLP0%@ZJdHl}_Wq*_ zRA`LAwdqezS{1q7^qUvN1|pPvCZGEdLqGEYb8f3CgzV>o4}>p&GoPKktXL;APD)06 z?KAU%8un0s=XKq%OwU!vtQd|Pr46oKEx125jjb8f|2;S-)#hW`P!t;B=>^d7?SsAGTN%{9 z1I}+kJLTZ!cHJ5v2^bh)n2 z(_>0Wh6qTq!_#z*Vr$ggtixz3az6-Sv)FHVpi!iX)BFGoeV7zjAwwi%)`>k%Ui%13 zkYwRJ)<+b&(aL%+l$i}NgT&@+WlOUq{I7k9Ekg)JjBYv=oG~)Mos52tX2wlEVT9wg zYTG{Xt6$bNcVL`%^~=o%(;UP0;F5P>i}Z}DzKfbKxc#z=3c6`4x0&%fEYnA9Q}6(Q zh4krzwar^t0_<}+I`i}1m6Jg+`0G?AV@<7x{Y8BIiDvh-5ciOzOK2u6znL8fRE}$NzOvZS!t!qs*a+4x z?TH=!kX70xbQ&tTiWd(8$~am}ib9G?gP2)E9@ygp>7$E9du@^SWyN8LCe8AGsNM+xhx@;~o59;J!YAG2v@UWQOG#S_RnJ-?b zoZMKv+*n>;Ps1`j_%g-{>8sB^ZoXlXd-A`NmyuxkV4*i*E$xYwDs1m>D~gsA7`}&O zYBMOcS?cU;QJrzCNN6}<2h(RUSosJQ9)j{VI`67wtl`qq{qq%F$yS0q?P<;vy3@v( z)yaH8%70ffHWQ)u0IEo~jrf^fq=LF-B95h(4S|C&W(ZXm!JSekDRR?vPL z)4@&6qmE6y+HE!rI(KUm^uaa-ru8M2g|@H;BX*J_t%3sH(`x*R%!CJqjrablR>E$6 zV4@(PJiAezJ3EINe^tF;g4g-(*%m?XVUj~*5IE2rIt@F)}?+_nVv#c8v- zNt<87fT;02yTD`ZR^961QIvQ#CClkgidHney?Elku_W#=TYZb0Ao%yX6c_l}IM2g- zM$#3}=@;I9CAL0~bf#e~QQYc0NqY%V#&U3%av#1EXn*TTrbvSqGT|i`iai3_jBSKf z6eUSnijYu}khsNblh!>(3~g)-erf{wT{(RCy*U(BgTTpQA}pad`9GGX|2i?TatL-P zzta71ralhN&}dZ$o)l=emZy_i@V!oyoh|zPKTD$v(3#mdJ0+@bDZ=)pTT8=n^-t89 zamH*DKKzM#a@WiI?SmrZ@(em7dpfG8-v5MTw-Bk*#wrv(+a8T6{_RlK`R-*bC7a3{ z%ts)dhrbBI zw`tE-xT;U2CSK&NKEjJv`-PC;0vKWzbrXx%t`BybJop*Epz#;Rr0BrUW(;~Q;T6p( zf5Dx%;-=u5L~S=^J{i_tEOkdL%{Aon%j%6 zpKeV!ajX)XkJ(T=MBcTnaxD{dxV5aR;JDlg<-HZ6(SLeAR7<0V zw!IF7RAZ~EnPQ|q{qZ3C?0iA=n1@CxESHW(6ME9Co-4Wb@4f&}_)5Mxv8-lIt9tV9hdK>=s-1 z%*%U_vkHYi+6V=R@VQ8s2CX!A2X$-=YSR0^x`3t-8Ni`VyAybGxqGcqG4SP>iLcvA z%H-Au#Zj$#Fv4;rn_Zp0`zQRxWHdL53IX{_f@ZRsG|%W<*cr@u+U%0XcU{&G>m|#d zJLU=-=032he=zts^q&Q%!0*mqeBq5@osD~SFbMZL_#`+PxI)ko0o>dQw2Bv37wwoK zx5k4BW;}W%ozvtvAoV8{vSP=0>Ve%9(TJ*=nBckw`(3}(Ng9J72VeM~{(N+*`1h!s z44Mxhn{zn4JaE@FR!RveE7S9ss`?l?8@zg2<(Lk>e=Q9KEesc;VqEw`ZS7@N=ce<7 z56QP5q(;}ip~rZICw`5u!~0TR|6rg91rPV{)0tzR0BvC6#&2%VACUZpx21hj&%pDo zg#8-?1ZtC~uX-#a*>v&7nk_BO#0+RNZZ%~y3}9jUBlt0A6DDp~O^yV|`CUE*X;9f8 zh*;5b{dllHsP4BJrKL&#@9ZVS(2H^&A_-U1rRlVm{>=rLsi91z28Ew0kpUbR^rf|) z0+`A0j;>h)g&Y3Pw+`q93lIR07b**%kXj)m4sTDg4`gN?(8e1`zdd<>Z#tux9i&?nH6nEfj4e9^w^1dMeDfLZAXuGut51|zwu)m4h;^L$* za#zaq_4l>DZBPkBM8nw(?9Im$-_Y0598={S4sX~lK1KR>PMn#@hu9OwJM7&6+3eIJ zTUSt*G$5PG@|v#cqrEGosKjLF`c%f&a4t1rv+_^9&cR5V<77DaXl;pWxNXrRnpQ-w zMNt=AP66nsRTGxgFO12T*ie=?1OJ1i=I^1K>(JxwhS>At*17}B4-I;WiEX+sF)+lA zV<5pYkPaEB@tyTe1B>X4Kl{N6=3xma*Ru?!Fd+bqvfXw}JJW;;NFo0@_DLHKsHQk( z_{$BGvC(u}xslAc|C|#F{9v$?K|(Pz21br-DBkuw=F}X@&K@ew68dD+!Ph~-%1;|m zcLYiP2s}D+%Py|J3K^gS@J2kIdB$TTmt;Y>TvW)Kh3F8NBInsQvpE z^K>XW4-Rf>mX*0%@)YX}{sCk)7WH&hRN6}eVH%7G$kUIBpU6JE@E0_^6+c37R016f z-gOiNyCO8Zd8X?Il=9by!m+}ir5F+Y=0g>1|x()#7Yo-0z}|m%Y_YN=vY|TVkA868=U$ZEieF=?-A1~ z`APzm`zgqstuyi`qm;3&u_IGQ*{MWb6hF%@_Ge&aHJG!wo2**%cyhGH6s6ocQp(E zKCzcEu5tSy1vHzBv^pxvN}Qc-EL{D33T1;cflnG^oJ{$$NyaeeUVzTGzO?y2ZP~1W z);+-29nJT}Z4P0lI^Foa0P<0lrh~F5W~KTd+$KqB?h^9{RdM&*MFQ^G3!EnmQHuS3;(MR zmjThXC$Mt`a$$uXp&XTQvXIJE8nqMjki{5u#7w|%FO(KiMMZWyB&x|Kk-hd_0q36E13Y4}3oJvV9hDq~RD*Vu+x#&^wWyr@0*<`5Y0RW1E@9^xFP zvpvz$6?{PD;rN$4cQ`~)quYGOJM}7+yj<#w4QlKoDIJ9Ik`a?6O`1kKQJ~e`1xAKu zRz8;I`QeKFk9)r02bynxKfC!6BzCJzwYl>BosuSUEzPpi7V6hN%*?5L!80=4$aN-K zA3uesMl+cdoV@~?ptQ56#Ct^qQ@^w;Hv5f=9Y1#s(D)G9jHq~WEtkp z@c?{rupErC7Tf<{Q0M*Kh1%WYtpJ!sd6z{y^BL2+h#G-7<6ykaIwh*TL%XHK1}e|! zZypi-cY9Tg)nMw0Pzz8)T}*&}TNHJ|UAgkv-9Eu?@7I;DUo&G|AW0X-{q{Rap)Z&1 zNx<}*=qIV~|57)x(D)(q!$IU__^tuhFZ}wbHOEOmtwIAN+XtEm$4jTevaV%6d2qf!wuN4MeaeGuE8Q-Ocku&%tcCLsVJ4 z%yqW^>vmf+@KwO2VTj;g&%!*+`K%EWWx9fT?eB1H$iQBY1UwP}%sWzqNz&r#KRt*3 zbHf>6k*n}8BD~1aniAN!sqFEKSF;X&djv%}w8ri<-xUnB(BD-9V0gg6xEu%HWj6NX z;4cTnuuvhIp7H+x2uy(ig9*<) zAAlgomi<}AgKG3AGt8^T-3&hO+sFw)_0NG1TNfhqyIuoy-zpC-?PN*%IBzkMG_^3#buh zjBA*;UiOHdgj=>2zO^|a3~PJg)gFB~PuNLg8%EB$X3}~2yJD^X!P7#I|LMU5z>hBX zjIMd9FFe6uron<2hmui-0BCjeJI#`-GWf9)qf~_-uErC7?xxz73y<|cKX_GLa1+D_d4>Va`j~+>I%KiHb`%(%QXw)K%GMwU2S(XTK4es(u>z!Rq~g zgn%B9qz|sY1a|B3clCoVeJpz!vq@0UT!) zI926W_(x8e*ECNux4vfVp0mquI>u%;n5ePiE3M*&0h-);gn+o)>!pFEwz_PR1{pLeL8GPS7Hcl5rb>r@4oGc79#_o4}S zqn7_-XCT7iFuuT?RsaiwA{n|vJNbEf)Jehbd_zUl0Gep|SX0ry%ns&f_W|ty{}AqH zB>HqTH)G0gcl{M_yg&P8+zMW9b$K~`Kog2ei+TpJ^&-`JF1TbffV||`91b7axU+HW zw#tTNx`$4U@kIWgHD;6qM;orPBFOAe1q*bNBYV$}x(?6YlbUT1%9P#g{{Z4?ZNt+g zFEFo;*i_ouD#%>aQxSBKg8@WmvRUyq`m4lo(7PL{QshQ_&F#*KIE4U%i1f~m7(dwn zTUGtlBuF+?k~;7)S`#5A++U@APxJR@Z_8k-cYQ9vi9r!6!HDhGrFB4iw|ZS8Y}kor z7vT2eeuCIbAK3N#YVHz9`YyNC<%{Ll!sUw9$G7)^i8cEJSsTP;;~`)%0YMRZo?tKc zGW8{5N~$x&?_kuH!FPUCIP zpj`Ka;{CMbd}FiDoz~ykR~{nj%(t{wzIMDO@`Xo0=2p9wgPe~Pn|r;dDhu-Pq;$65 zamVIycT&Nd`RQj)2sP~aOJ(|u2aT|7#wjL0tO1&YWFVm9@Go^?9FL^Ii1?!x5{@Lz zjqYLmzPMr2{yUmqA7hv~e8$~-G_3C+PaDZ{KlUd|`dd614s+N0d&JC~f-tKPeQ!{~ z|JPke2%77Qm|ETIpHG3$K}nVM4P>1>0^zsVFPmRSY-T91Z&g4j(#itO=q`=4BTMP9 z8o;RCN5n|)Z68*6C9^ok*Pl^B?KZVE`q@SWKOX%HFU-Sk@Moem?*nxS7NBp8%}M5* zpFBKrk-|BY!akB@P=o9-n64QFoz(tb^{A?Qz^Y}W`n+22X>Ix^$m#Pg-CNo|T3w=s zRJ~K0KM>v3E>wun!A0v0g4GXlm-o+|EiZkvkB-yM`YJp6pzai5Owk7hr`-Qm>#z?P zZ;?}*W;_*R1!N7q2?s1Zs+rK;X5WP8Ie=%*@FX@Zd=u`l#s^lzn_0S5^9rE)$+59b zbJ2o#?fiejE{?C+@>QL|_AmdI^3y0_jMC!8!9Q6vHj{H;fTJOCe&QD zuN3(=08h~XWjCvGyEAGm9zgjt2L-i1N~Ff@fr=WAs$K+OoalWQ??Nno07@+8YQ~%K z&nbb|cjLpXk$H}QB8Fm;#2IJ_xLSMdbFx*6{3`CEnaN~qoW6Q}v?!hTpPys+^d?eq z2SyUix(RD+v}iXKvKwJr^(eFth@e(J7(j73NRf_@|A*CR)?nvabN5FOvx6zZRUU5Z zv_n(z4-z|X1ZI|CpWf7p2nFM@@fPc6(o0MCa?jNM8Q2pa{|Ph!Js4H~k>V)hv|@E2 zbe0oS>+*iHD~&sSjP}1Mx*#*~qj1<+3#I)D@Brt6_7sO%$cyoRE8$-tX{cL8fm3V zP*NJ{lx~o25QdKb9)0xjcm3b@^ZVuT6W73XdCs1_*S+qw)_smR-h141Qh6MqK66jY zX!YWj`H#=`#(tf1M>JeJvHEV9VDW@Qa*Xj)ICgWd7P{&Kr;0%13iF_2H=r?&%4@_W zI({*Vb;RpI{p~r~(SZX;51}!T@^Qb?7iN0a-76gy4tM$#uM-X=Zg)rd&qz_bHB#_7 z1m+I=a4h&Y>pEoo;-et~$Ns~sgONA0~0`o#;y!S=jME${kfXrZ27 zUFGrtzcx32tq$Fo}R*P0z1Nk9p;#|?g=XCMOkb&5`7pxbxthm z`}a_;WeA7yCl;EX^VQ@0=3;h($h+kuPXVt9B3Hr1ZhG?Lfh=mX7Jf+{befbFCZaPp z<$)L3gA(=}5W&>h8V0{ORm|$LaE9U^`z3RqkZgA$!MkUFWGc(Ctj-xkDAG`2*y&GW z)TN>{Q&(&+$w5zC$?w?nmn4h4=T?OlN^lV@2crX+Uh{83qhx1$K|Q#_C3A;7zr?<| zu!>?h)Z(g=82a6-89}Bb;lJ=7h~iZ(&z1xP61Lqg(96-|@l^y!)3Y(06vb5ZeFohm z(5j#54}(|8{pKa!Xrv$uH!pMVhbUPaFKW^%Nf{mJtl&#QGyWS}7+8?*VQ#1)nYttE zQDRYjZf;na*7b)kSejRV>R50Noa_tz(6i+_$ud6aO&1*2Te9Evyv)i}MDQpR>ofXh z)?NJAV`Gy3L_cN)kqD+F2yzxLI8!aP5DwA=`wcbcBYh0bjay2rdab!H7`QTU11`_l zf!JGYu7>&&W45xt%t~+KHACBviz`e>M&daZ8v&izY^ScyTwYGd<3BNOyQg^H@-X5wWnH_p@y z3_5kWZ+oG>B)eaGd1<(6Z6;iieYLSrseaE;b3|q$|2Id^41v!|hvPF_Jq>j0wI&V^ zI~w0(yx%!{ODo^YN_O;Te+TXPYqU6j`?;Bg8eWMV6$jT+*qm$?FG6unXO8NRe-LoX_a1A#YOY(E(GNPw`!H z6M>)_nr)a*l|26^Tm+I1wP35gwe50Fg8+kqCg zm=2e5YK(8TFQ1R@Q8sCppryaR?e$LZ;m^7xcnpLvy$CJyrD|GwZ!2rron~q8p7BGE z)c5!=58W{|tkJ)(bA;Art#)~OaJ9Mo7>Qv1-CY~%ASJ_s*dV_QI3YHM_?P9Bz_lkq zhi&KB6Nx_I6tKwPx>X&mQW_(Ho*>|718qxQAnx}!8E1t$<43n5=TeNb1+DX0v$|8U zCIoBx z>5ZrJdU@R%RxcNVBmx#rC9pnRPVS|jXvILu=bh;+{*jBr_395(_Yb3B7$zty;|=X< zYNflgp8ls!jL(}Kz1JnLJuF`4lvzt+Kzk)W+m%eq7w~(LkV|c+fJ8A;$`l1)I!1aQ z*Hg;eIYdBe&fNf|hq<{bbmxPZ0kLi=VZ(4N;P*HHT7o-nK9uNO(pV==Uk>>RV@W`$ z?GX0k9zDlx1WFM*gv7v$`igl9rStWD`5s2U9cPj7khF{Z1y>so6C1iKHRZT#=a?7({u6^iI}( z?K~fGLoyzpKl8*EWpNOX2S^96#r8){rEPpaph_`c2nDQ9prNJs8088uP}>o)s8u+j zJ*kI}{5@(|DxPR|%?_`+S1JiA|GpL$b->W?EiSg-0MTJG!6?UczwoO0EaAZ^on}&& zNI`l#d?zc(3dFVge%)N-;$?Q3?@0{4>e>Y6TfQrPO#hVV`U-D+5+`Tmv751z_!6{* zdKNmy5mparMA>zAC*GAW3e1i}tuE;pBpyb+|9s~Zx9HmMEBK%2k-~d;@!}l}U4F*V zGf`hfS|0_Ps@>2J=;7PPRQYQ<%`ru{C#Gd`4Vh*VmVZh_K2{WcrXo5CAU=q<-QYGm z<3`)noi79Eq9S@+v%0PHhOazh2b(^DpxNF#A*X-;Qvv|K?*c;_8C793m?j29T^XM- zjok3gvKXX~7MjfDKW|xC5B17eoi7%f_sF_WxekwZ9O4D;A&(9hN94UGk24Y%qDj=8 zb&Ty90=Hv}QF+byH&7r^5}T42-@l(>C2My3$M|oR-W%;L6DTh4i+{W)ORay2-8RRV z-EY*EY_r#g+B(}Fgi=bb8xJg`Jx&k4*=&$^uq@Np%}HNDf8S%~Bly~A=kw1)#{~tQ zW7K?P2D4enr@n53TWlRJhhi1VR^ErRNh(uGkyB8+m{srj`+}Vs@!lC6Q${89zmXb= zx(W$Ml#}fw1stZ89cD`QNL1~}00M1FR@y@)1(8=RjOu6f#1ZjCaSdhNROjO}4Y~P} zXSwA~xrLHetM;Xf3BRPag-Gux?8HN91Oj$GLCdI{NFs4g7hHw)cGpH-E#V}rq~NPV z8U^TLp5gd!2^VyNm-^BTK(qgE*Ft(NH4u_kO0QV2YF>8rO)+3DHbW1XTy}**-RF#n zhk(_2iVBKgaDf)N$LOG>+kM)Gye1nZmj96Iiqkbk;GKu-P*HEY9CtAns4 zqY&99WKRZlS44s2MdZt#`1)cz7KIvZ2(je%;W$-WC%ieaFd#cy#%k_CX;}ELl`(nk zJuNIIiZV14vft~4J2Zn1!Yhkr-UnuTH4Wm+Qd+#Uud9~yifPQ`1Tr1u)^=&as_;Hv z)c&0%GwcAk{Eo+>76=bqb7=9m8w&}6*~)mnS%g-V9NXg?_064%e33Wu>}VMfa%RtQ zZ8#5Njo4xK0mF$O>#tK8Cyth>UvzPDt1Im4*obA>y$DZ)yb5&FdmAkc<6YdUsCAK8 z?`=}~nqw%q7{!zq;5E{}s}U>$$*kwUE)U*_M4)A6X5b+OmqR6xJOlmV-rQjIye3Or zT*ZOMlHZfF?`(?08A4G~f_>&Ch8rcgX5wb&0Y90e5OXRo4FuBVkpHcOmA8LSq5jzl zF9TYYh#|W+CU*wz$I_kvb#($=^D;`Z#@yUjB{P5TkarHC=TSbnVuu3G3=W<@Eztl z*+7b934#`=N!>2lCV@Z1_S`%M?wSjXJ0esmGOVSY91V{K7<|`O18bHcFwV?cyn`9`3w=fD{1%T#G`+l;Af`+}YFsBv#U! zvoCg>Z3T1>ClhVp6<*i560p!1c6iJz_0Pb9B$N~P{IrXRSBm$Z3ic3tn*QjGdneWD9*Kv ztT?*)&`pz!GLgmbPm>G|*B=iC1Z4$PeYkEr($1L?gLqm=7OjzO#8W|1n<*0>s|O4h zWE+_B3d;}kswj-y*uWr|D~GeL&SgwL6; zd+w6=j|?*<^|!Hb2|Z`GWBPq93m-s*dQrLY?lvZ?A<#Rjy04U)fYL}Yyq1$R$6-&P8+LFnde zU3R&etv?MN*I4A-bW$Yny6pW{%oc4w&J04nJuVsGmZ`asZ_o+Z&1##0~$O1D*)`77Pi_M_Xd`X6_n*d8poE38rI6Jio`r$D!H7-6+N9HvT zQSIigk$1BrZaC;B&bhkLf(gGxsH#;@m`7SkB}xe};2Xw1#S`cge4-6LN2<*eZRFU> zMlg+4CDIait`l{C$l}zl4l=|(IbN0mJ-hKdXUu;zYRj_ zCY9cFbpAB-@CsX^MvrPmUW}yfC*4*uKXS{ZMmPruh#lsskHHSyeT)gsO;- zi+Mi2Qd*0P-lpc2*g+s@yWr=2tdoTvs(6};bxN{Qe6j|o4#VZeFe}gV&LGR6Uui*biLg54YHcHyH^4EPC+qv% zv5)>SE@z4r*q+l~5R(DVBOrJ$ns&wCF=A8^=(J*1r~%Cqa9Wq{#7F$xbqbcK16r2Y zcOyUqRNNnuIBQ!$w6f)|RY8}YN%1nI5dQs@C82VYyR|}uCXLb3$uGe?FUm=5vS9xDl@RkFqg46~VYV#nAfy0nd%Vyb#A=hcc% z1JiQlOO*(F7iM1sYD5$^Q6tyGVP*f8_X45 z496-wr#{SF_xVTy!}pQ7@_3QfaHw0RKdTl)RiC7WA0JG{NJHp1*J$=cf8K1S8vI7w zc<4!Iyp1RM5%$4m01b`IiQFY><#XMA$6NSmi7mleXdgR^e1-%mKSpQO3oUkO28moa zK0C9Vk5clt1z3)x14ypJrH3S19Bd8GkOz(t7=)mafQ*gfANoxAyjE@NqB5MYGwbHl z&3(9iN+qL8Vqy?6mbrB6a z8FKxxn&`iVb}F0@=_yEuwC8|p9FT-VJ22j;BwLmQHim2L%}tlg!$k?*rqTACw>+E^ zdJ3nf%2Ak^;i`->MmSftk6;@umBq9)iUhY+)t;cC{@P?9Z7WSklz=aLnC1pjKO~|T zC^AolUR~~GEoBBxYY@!%ogQJeO)Ck80WGHk7-BL4G7eCk&Q~_U#W-F5xA6WN6S2Ps z0P>NJVh@rLc_Q$gzyP<^5Oh!Art17u{z02nhr24GFK-=Y@g&7Vc zt~P7b8(vE4a?UjYn=o|a1~PFJ3m-L&(5=aUZ<^O2IPpXgLnAczdztuwUcszuAzs% zkuc6$DvX`Jyjr9y+?MUN_?OxSnrsq~qL0N!3U_ZAQxkdUKvV^cOg2<2V7(?3UMno_ z=#Co?+XcKhJLTn3<+@RKHVb(#=ar} z^fnzW-`K#3WiS|*(oJzpy3=e-oP!qUJs&GU@Ln#Xh*AkZJNuOduQ2yO*G)v4GF(lq zk)voqWA&`+m7wmwCn60=a9OV-8IQ^=6~;cls^&6Si75gRb_1nw?SPz03Xn;9Y@S4FdQE*b|LQ3fRznt$ZL~R4(L>|$eWyf- z=|<7V+=&~Q4;zuO8X|W(4O702$~}^;sj%1Ep6F;DQBHjVY~LyM%xtu@zM6U)%jxCD zx5pu3F|LF1_|LIbLQYg%cf&loKptX3oRy8MD`tdKP!<<&nJ6EV5&k$he5ZpS$OsKy zOE?(4qPVJR_+&BlAolnU;6ZXJ%0br{T#_;W2X((T;=s~l<>a++a6S9zmuc2WUbK=r z%KH+5Sc)+YqwXk1^1NVlvvE1upU+(U0mIq*8|{XidW2*B;rrX3)=RdUYH+gXdLd(W znOx$S3)jZYZ$i(l|D`i3vnI$SYKf-zHgc}R@DY6E-at^!adu)O-77MkvYd=eUWTS~ zw^?f!l!hkf#Ga8%>afC6ijW3z4}y&kc}BlRZ&7lH51bO8<3O!K!32$3$ryT(Anro2 z_i3Nl4t*6Z2(9M6@EX+Cxe3EgL+iz$BenIC$UhkiF4Cqnl~{j6kU19;>^bz=usfD! z4>Ra5Z~^^-3^>0yMho|T>H{UnnblSQjVIDO-}kWfC!e4}IusKP-~aFs%%`lRSxX`BIK}&3f?E3H9as<3slSjQ3<3Qwq%^H6-b|G#|3jYe^nxfP1aEwOPq3M zPjh5v)|g@E7GrDN6S`vSCLr?_9{^iMmz+)3yU1(b@3Go7qTQDS`A)0{4S2L9NbPt{ zHG2-Vxu+Io7mw3NT@R9%i#6A99tk;Q9IZreD6}rlwNUiCac&or&$URtQDP4l2;f4(L^$Vh3kLU`706uxCLjBMoPUu&WcWioVbIXiUc7P8I8{j12H2% z=hki&W@!~^^h~G3F&k}lzlBx_OPc$FdzA66ky{`>UrRaU8v4~}WH8=(gee-$f&Ct+ zjP);ZR_Y(Sb`9#@?oRXfR@XP85e=I&UBaE={)ofpTiAL`k`OM$frZD8S;NZ$N&`T4ewTB*N8r-wDQOLm(*}ko zcq>*pAOzSJ;|G%61Qrh1V8Z^JT=k%lyt2_1u;nKg|KEPHO0oaj6`El2&U( z9G8f3kpBWsWw;T@z%*KeZhzoIK9J!>V}{WPAa45Y5ZDDc`NzkcnA7B z1dAjgc2UTyzDAaUgj_KEsYa$X=*aM)C;JOKp@F zlU8%0U%+D?Qi^Y7yP&04P~{N0qaB=mEzYx4z>2of$mq>WRHKX6i+9wZ-ekW>4ExC|h&Ihe(-ojuSRdVrJ4C&!u>^oSov9S@ZYqmDidiQ%YDuLC>$E zLd@seUq?X}ZzVTH7x+K8k(mcTc)-zAkOR5=r0lQ{Px&*@{UyPefl6q%u3?$dy0nw? z;|2K?fF_tyre&6i)=6m$L!P%dB-eN^QOw6WSZAq};}V`*mjK!*Rxt!fQ(CKuX0Y7;HGAF<>eVk|hsM>IaQJSz~)lGk37O`*ge) z_V<^tBMiQTA6JliEtsO1m%Ne`pG^e4ODFM&5?HFKEwX7?CyWEkJKL4elFIf$ufcB9 zR~I9Wb{Z-4m?Ts9lUeiuhkar!chdt;=HAs(d>rU`RZ{H0o6vDnYmqnI`zjIcf-js3 zzG`KpY<_qGvNqUdwNHFEtK`1`GiQpc(}Xk#a)2E8ji>d(9~yXy7Is2>C!n~)40aHH zn26unoUrGu=9BaFNkVHJ`{)n5)ZLvrh8T{v0Jh-g$W8RCI=%p7BgXo-c(30BTWOXR z!xts3$Hqa=|6C02ia%ZB46}NQCL7pAvDR33DGbIIGv0n^jeM~}Ur~OJn_EXjwc_Wo zG!~px%b$CyrGo7SGAfP){RhhDeSDh^*R7y})I?I@0vSe2!>YHgr0}eQb$X%qrd|e; z5x!Z~lEWdF%1bmfiXF9$nrvI>;nf+E`SDGuA&JZ}zFdrRut5(Q$_F$1<(!#!cO%mYUuIu7jzN^f0L*yOh8&CM_pXE7)+E-(RG> z$?F-jFJ-dOV|hmukpKDmp_t-UV5!{=T=Ao4dxu7rA!9ZLErIElwfT*=4X(6Hy}f2d zPJYJ};*iN*N*VF&!;!-uc!K+^-a@ibeP}px@~i$f2^VVXE_-qi52<4PZqE#CZ;v4{ z|L&0unSX!0(UK<2Y3MUD;QBDACs~A{q=;yGAR>PB70&H7VW(@tvs6KP4{g^5UYh(+ z!FmtvzOZYFe|3>EQJU4R{(7+qTiWAUd||&C?Q^QQp8h4Bk-9_l=j}{d~eVCSHkyVqolfY(ahWJ$u7nWsNSgv7=F{PIq@Aqm4(_3j#NyG zccU1`7{GtW2a!Egz1MZ*g9d&n&MqFP(A&pNhAH@r)zu0Q@k^%$_fi9cz%WQEPe!JG z64KWSiA8OHk5l3=Qf9a?b9tQ$WDks4NFS`=b3$c8uQPUZymlQ4g5A7nG}6oC#Q`~0 zRJn_(>fLsegCF(&Ku)Y=;)sEW_fw& z`DOj!(XnM_D{+(;z>tD$`*>|z6Yo(pB=a()7gHTfzQ|k!e|G0yJWklr7X0LR+Q)B09!RXt2ZsLY8bL0a*V*Ug0I&)bH$_Xk)lt*Gh^_?| zh#b(bgnYW4<`*n zyt}B*pt{|kjfL-OsxcuB4kgJb7$-~7K}l?=FsI`~5vp^VR#LlTI;p zs68d>s>@G${WiAwMn19*hP{53F3TlcpIv^xm59%iTeskU$QjHFsnpDTUhYEH0EHSj zi}l+04G=56jm;;B?X}E5pupmCLQ)F-e3S_8PS3I>v$P{n;#@NOfCAR{nbB0uQaOh; zQtOfLp!o&G!5i{^r(u~Z7A{;JC3GoCHVUN5GQA@{FeueGA310w-FB$h^lL4F&i+;x z*{kb!l+O7cQ|QYQ)o=uP7Ij^&*Qu$R<>@PzDhipEadalzv=tJ%fM^ciZH%!IC}v5D zmvEInn0*41S9em5bxY5MM2jIMS$L}{?hyOK@Fx?cq<$V9)GPzT{kq1zl3o0q?6wfJcg;jwq>AyMU zd>wevjR$6>rXud9cwB;pJUu}Ez^yxh3g#(=2%DVJgM@<5WiwWGtxel>;gh`2?M|Vd zjxj}nrBC8b**o!NRZ1DPO5ZhW&^0{JuV~h=ci4eZ^KZt$4ppHR=x827UHSg_A;aL! zTj9O~uA7LN-GA#PaxKD<3U5Z{chaBB1ynCk@x$3!(0Rao!vhGt!Q_*Yk;rzG95gZy z#Dtwj5)A=+QsC(dl2ekGY{L4!VZ5alsDb^%-!;d~q_IV@Wce4_+`$*jQ=eT#+g@B9KN{X+_KO{( zYNIQ9`(hN)=XAJ>-@n_X3f&m$9nybbo+Ych8C_VKZII{pC6h#DFlUl#g1_6c=&!8A+UO+&Pwt?SVRs)^%Qor&2I%(8%Qo)p0{BndU3)ZJqT?6yv?hN za3rOw(KE2E8(t=LRSYoojZ__Z`cuRDhf$CnU2aZIL?n%9W9>L^i@8NWTZ5Vpjv@yO z-i{WqK|be5(FFK2g^Ni>-OoNJnkCHHgkJexcBO_j*!%ALyL5$cDc`$f#4rrsPcQwT z0+8j|*L zH$n=7d)G%OrHV><9NP;#VAs!~92iV2?sQ}7oG&}~#NQgFtVtAbqSnxO3|@H1?KxK& z_fC{#;H1~t_@HK_sMgPRe~hzvCoLu9B3zH#dDc)&to&%%i>4IMwt1o1lH~2veXP7J zre#@#!{m;S@}2GBO>VDCX~#C5i-Wqa8y(Scdcu^YAEH+Z3`WA#n;iRTds+==F1Cih zck5O$S|f~IcOOIfI-hpsZI?ZwrR&7$5_T%Ah3v>>xlV0AlB@rrOs%vI8`mrQMv(i} zr7sEo=#(`}mGe>)0S38axu5As6ggaqYbxb)#~K6csM_ToCLLLaq7l ztR78S1T|4jAHw&`H-jrZAdic;nYqDZ{-`9JvrpX@R=2J`7Q)fBkM0aO*a$oBy|lm= z-$|o7-|rn}d~Sc>P;>r8lts_0dve9PcaH01!g;WvqHb|`YtZssWKb+wFFA9orR4`Y z0|quRSZfqiGz{!7ny7yBZshw2SnGGUPq%ze=6J6ll*eblJky&MwOgLryTWrew*-qbB31}g;U&@ z@th8R^E2YqrFhH6_6KTR9mb>U0y0ISp1SlkNd^o}5i^wM))U6X#^y3F2HhM7*^IR+ zv){kV%%cp;*S8wbv*Fd8VblujIH@c0RIJHgfQlDZJHD{~bSok^cDs}?c>7FM=}^z@ zjqSlw^p@qYl7aqdC4P9u@k;F6r!#8PD7J0+TN)v@E$_cvfA;jo_p?f2cnqF;$IFJf z>st}iEsBV#AXAwBS!t{B+4uf9aj&apl$?*mJhbY99M_GH_Uaya1Rb84ba`w{YYCk> zgkH|C3iB%wihVu_`yz&Q(9w5PDfhT|9i97RL3+w~caeMTyB5|o&t#zo$Bb=A*4@1` z&1*QwRsOl$KF;ihcJ3o4wLe$>8*W!7ca1|%xDMh+oIItm>zq`Ldc~LPC65GoN4;Q6 z;trb4fdu_^rj`o5$>AUBPS>~6UzeJ%xzTlIOmHVTfQ>xzbtU;Uz1)Lw$!m4)q=3_7 zuV}6MvSmhyt~6x1+W9iN)jnvCWN(k#edl5S%nLXC?b_?EiRH};4lf{c?I&7xm3Jz| zTXeBL3Jc0Q=X95C_D_^C%O0MqLHjyfG&!WSZ^R}?4Y-WB52o2ngbP3FnYlC+Q`8dg zD46NIJUiqTL)17IKDqts*x<;0-+8O{WRI=)%q_GtMb*o1=wUdbb*k==lH&1EJA|3v zYU4}KIV|RKk%74*tS?J>7_}r>9ICvwJlzuZachqAj_rCG)kaO8na1*xFkgx#;UF)Ao(e+-eA1?Pj-5#9LNh&T|Vr7tWlK`0R@NQN(dPkyggGvjB11 zMC?wLx$U7+pS``kH5q$dA=Qw)>9n{}!ohZd;lJD7vkL=1T*55i#0mM;%ueA%N#?)v z4P9J}tEtq2lz@k|Aynt=g^M0BJ)Az*lr20qIC}*B9+P|)a>9D>L&5`0QYk-@zzQG! zx-&o|cj@O=6z;|rR7*ZDt^Bnj)NSTs_fZaE36)0$D$=2U#KdaQMJUgVf{SXgCCv&@xRb(Jqnp=Rc>)!Q|iat)sQHse~#dV~xvtgf^It%l3i zQ{8hxDKEz=1=ien);GT|UR|-{IGJ!7#(UK9wC%@MhrMq}`C?2QJEt>cca+b^TH}vy zb&Er!q1#P$nYE_BPdq9bI}R0{u7$h?@{^-WO4f+&7|DEn!*5e%qHN{7vAxSP!DAiv zP}WWnj81P2j+^l@a$h=`+c@jU-fHTvaMVyoRJVTV4C`L>jh?E`s4EtcB|1}ZUjMPH zX>7l7taU4kh|}Pe_Avq>wefU+$#!=3=(-n~yt3%H7=^e1rQgJX%w?I_RGTJ;_E;@( zhP`%ACOh}2qZ?F5Gn{@au%&-3Vtm!mPt+?}l9u z^>W)7Jr%{4uh`PIl%<245+Z=74@=8mm_QHQY%spuSwKTW*kFKEs|3#C>V*T3FURpNU zwJ)UcZ7vOKe5&i7K5YK>K*7__3=z2e1FhqS{#T9CQ^QAMVOrCQtBo&@se{~lC zwt`-S-M_%`r*Mv6;Swwxiv9F|_|*eED!PXw%yfU9n1BAtrwqW3HR2lk|KV4IXjrX- z2yD4Om;C34|8DEgs$&!X_ig\n", - "table {float:center; width:50%}\n", - "th {float:center}\n", - "tr {float:center}\n", - "\n", - "\n", - "| $i$ | $Z_i$ | $V_i(0)$ | $V_i(1)$ | $Y_i(0,0)$ | $Y_i(1,0)$ | $Y_i(0,1)$ | $Y_i(1,1)$ |\n", - "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", - "| 1 | 1 | ? | 1 | ? | ? | ? | 0 |\n", - "| 2 | 0 | 1 | ? | ? | 1 | ? | ? |\n", - "| 3 | 0 | 0 | ? | 1 | ? | ? | ? |\n", - "| 4 | 1 | ? | 0 | ? | ? | 0 | ? |" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Likewise, with this notation we can formally define the principal strata:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "| $V_i(0)$ | $V_i(1)$ | $S_i$ |\n", - "| :---: | :---: | :---: |\n", - "| 0 | 0 | Never Taker ($n$) |\n", - "| 1 | 1 | Always Taker ($a$) |\n", - "| 0 | 1 | Complier ($c$) |\n", - "| 1 | 0 | Defier ($d$) |" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Estimands and Identification\n", - "\n", - "Let $\\pi_s(x)$ denote the conditional (on $x$) probability that an individual belongs to principal stratum $s$:\n", - "\n", - "\\begin{equation}\n", - "\\pi_s(x)=\\operatorname{Pr}(S=s \\mid X=x),\n", - "\\end{equation}\n", - "\n", - "and let $\\gamma_s^{v z}(x)$ denote the potential outcome probability for given values $v$ and $z$:\n", - "\n", - "\\begin{equation}\n", - "\\gamma_s^{v z}(x)=\\operatorname{Pr}(Y(v, z)=1 \\mid S=s, X=x)\n", - "\\end{equation}\n", - "\n", - "Various estimands of interest may be expressed in terms of the functions $\\gamma_c^{vz}(x)$. In particular, the complier conditional average treatment effect $$\\gamma_c^{1,z}(x) - \\gamma_c^{0,z}(x)$$ is the ultimate goal (for either $z=0$ or $z=1$). Under an exclusion restriction, we would have $\\gamma_s^{vz}(x) = \\gamma_s^{v}(x)$ and the reminder status $z$ itself would not matter. In that case, we can estimate $$\\gamma_c^{1,z}(x) - \\gamma_c^{0,z}$$ and $$\\gamma_c^{1,1}(x) - \\gamma_c^{0,0}(x).$$ This latter quantity is called the complier intent-to-treat effect, or $ITT_c$, and it can be partially identify even if the exclusion restriction is violated, as follows. \n", - "\n", - "The left-hand side of the following system of equations are all estimable quantities that can be learned from observable data, while the right hand side expressions involve the unknown functions of interest, $\\gamma_s^{vz}(x)$:\n", - "\n", - "\\begin{equation}\n", - "\\begin{aligned}\n", - "p_{1 \\mid 00}(x) = \\operatorname{Pr}(Y=1 \\mid V=0, Z=0, X=x)=\\frac{\\pi_c(x)}{\\pi_c(x)+\\pi_n(x)} \\gamma_c^{00}(x)+\\frac{\\pi_n(x)}{\\pi_c(x)+\\pi_n(x)} \\gamma_n^{00}(x) \\\\\n", - "p_{1 \\mid 11}(x) =\\operatorname{Pr}(Y=1 \\mid V=1, Z=1, X=x)=\\frac{\\pi_c(x)}{\\pi_c(x)+\\pi_a(x)} \\gamma_c^{11}(x)+\\frac{\\pi_a(x)}{\\pi_c(x)+\\pi_a(x)} \\gamma_a^{11}(x) \\\\\n", - "p_{1 \\mid 01}(x) =\\operatorname{Pr}(Y=1 \\mid V=0, Z=1, X=x)=\\frac{\\pi_d(x)}{\\pi_d(x)+\\pi_n(x)} \\gamma_d^{01}(x)+\\frac{\\pi_n(x)}{\\pi_d(x)+\\pi_n(x)} \\gamma_n^{01}(x) \\\\\n", - "p_{1 \\mid 10}(x) =\\operatorname{Pr}(Y=1 \\mid V=1, Z=0, X=x)=\\frac{\\pi_d(x)}{\\pi_d(x)+\\pi_a(x)} \\gamma_d^{10}(x)+\\frac{\\pi_a(x)}{\\pi_d(x)+\\pi_a(x)} \\gamma_a^{10}(x)\n", - "\\end{aligned}\n", - "\\end{equation}\n", - "\n", - "Furthermore, we have\n", - "\n", - "\\begin{equation}\n", - "\\begin{aligned}\n", - "\\operatorname{Pr}(V=1 \\mid Z=0, X=x)&=\\pi_a(x)+\\pi_d(x)\\\\\n", - "\\operatorname{Pr}(V=1 \\mid Z=1, X=x)&=\\pi_a(x)+\\pi_c(x)\n", - "\\end{aligned}\n", - "\\end{equation}\n", - "\n", - "Under the monotonicy assumption, $\\pi_d(x) = 0$ and these expressions simplify somewhat.\n", - "\n", - "\\begin{equation}\n", - "\\begin{aligned}\n", - "p_{1 \\mid 00}(x)&=\\frac{\\pi_c(x)}{\\pi_c(x)+\\pi_n(x)} \\gamma_c^{00}(x)+\\frac{\\pi_n(x)}{\\pi_c(x)+\\pi_n(x)} \\gamma_n^{00}(x) \\\\\n", - "p_{1 \\mid 11}(x)&=\\frac{\\pi_c(x)}{\\pi_c(x)+\\pi_a(x)} \\gamma_c^{11}(x)+\\frac{\\pi_a(x)}{\\pi_c(x)+\\pi_a(x)} \\gamma_a^{11}(x) \\\\\n", - "p_{1 \\mid 01}(x)&=\\gamma_n^{01}(x) \\\\\n", - "p_{1 \\mid 10}(x)&=\\gamma_a^{10}(x)\n", - "\\end{aligned}\n", - "\\end{equation}\n", - "\n", - "and\n", - "\n", - "\\begin{equation}\n", - "\\begin{aligned}\n", - "\\operatorname{Pr}(V=1 \\mid Z=0, X=x)&=\\pi_a(x)\\\\\n", - "\\operatorname{Pr}(V=1 \\mid Z=1, X=x)&=\\pi_a(x)+\\pi_c(x)\n", - "\\end{aligned}\n", - "\\end{equation}\n", - "\n", - "The exclusion restriction would dictate that $\\gamma_s^{01}(x) = \\gamma_s^{00}(x)$ and $\\gamma_s^{11}(x) = \\gamma_s^{10}(x)$ for all $s$. This has two implications. One, $\\gamma_n^{01}(x) = \\gamma_n^{00}(x)$ and $\\gamma_a^{10}(x) = \\gamma_a^{11}(x)$,and because the left-hand terms are identified, this permits $\\gamma_c^{11}(x)$ and $\\gamma_c^{00}(x)$ to be solved for by substitution. Two, with these two quantities solved for, we also have the two other quantities (the different settings of $z$), since $\\gamma_c^{11}(x) = \\gamma_c^{10}(x)$ and $\\gamma_c^{00}(x) = \\gamma_c^{01}(x)$. Consequently, both of our estimands from above can be estimated:\n", - "\n", - "$$\\gamma_c^{11}(x) - \\gamma_c^{01}(x)$$\n", - "and \n", - "\n", - "$$\\gamma_c^{10}(x) - \\gamma_c^{00}(x)$$\n", - "because they are both (supposing the exclusion restriction holds) the same as\n", - "\n", - "$$\\gamma_c^{11}(x) - \\gamma_c^{00}(x).$$\n", - "If the exclusion restriction does *not* hold, then the three above treatment effects are all (potentially) distinct and not much can be said about the former two. The latter one, the $ITT_c$, however, can be partially identified, by recognizing that the first two equations (in our four equation system) provide non-trivial bounds based on the fact that while $\\gamma_c^{11}(x)$ and $\\gamma_c^{00}(x)$ are no longer identified, as probabilities both must lie between 0 and 1. Thus, \n", - "\n", - "\\begin{equation}\n", - "\\begin{aligned}\n", - "\t\\max\\left(\n", - "\t\t0, \\frac{\\pi_c(x)+\\pi_n(x)}{\\pi_c(x)}p_{1\\mid 00}(x) - \\frac{\\pi_n(x)}{\\pi_c(x)}\n", - "\t\\right)\n", - "&\\leq\\gamma^{00}_c(x)\\leq\n", - "\t\\min\\left(\n", - "\t\t1, \\frac{\\pi_c(x)+\\pi_n(x)}{\\pi_c(x)}p_{1\\mid 00}(x)\n", - "\t\\right)\\\\\\\\\n", - "%\n", - "\\max\\left(\n", - " 0, \\frac{\\pi_a(x)+\\pi_c(x)}{\\pi_c(x)}p_{1\\mid 11}(x) - \\frac{\\pi_a(x)}{\\pi_c(x)}\n", - "\\right)\n", - "&\\leq\\gamma^{11}_c(x)\\leq\n", - "\\min\\left(\n", - " 1, \\frac{\\pi_a(x)+\\pi_c(x)}{\\pi_c(x)}p_{1\\mid 11}(x)\n", - "\\right)\n", - "\\end{aligned}\n", - "\\end{equation}\n", - "\n", - "The point of all this is that the data (plus a no-defiers assumption) lets us estimate all the necessary inputs to these upper and lower bounds on $\\gamma^{11}_c(x)$ and $\\gamma^{00}_c(x)$ which in turn define our estimand. What remains is to estimate those inputs, as functions of $x$, and to do so while enforcing the monotonicty restriction $$\\operatorname{Pr}(V=1 \\mid Z=0, X=x)=\\pi_a(x) \\leq \n", - "\\operatorname{Pr}(V=1 \\mid Z=1, X=x)=\\pi_a(x)+\\pi_c(x).$$\n", - "\n", - "We can do all of this with calls to stochtree from R (or Python). But first, let's generate some test data. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Simulate the data" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Start with some initial setup / housekeeping" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Size of the training sample\n", - "n = 20000\n", - "\n", - "# To set the seed for reproducibility/illustration purposes, replace \"None\" with a positive integer\n", - "random_seed = None\n", - "if random_seed is not None:\n", - " rng = np.random.default_rng(random_seed)\n", - "else:\n", - " rng = np.random.default_rng()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First, we generate the instrument exogenously" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "z = rng.binomial(n=1, p=0.5, size=n)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we generate the covariate. (For this example, let's think of it as patient age, although we are generating it from a uniform distribution between 0 and 3, so you have to imagine that it has been pre-standardized to this scale. It keeps the DGPs cleaner for illustration purposes.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "p_X = 1\n", - "X = rng.uniform(low=0., high=3., size=(n,p_X))\n", - "x = X[:,0] # for ease of reference later" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we generate the principal strata $S$ based on the observed value of $X$. We generate it according to a logistic regression with two coefficients per strata, an intercept and a slope. Here, these coefficients are set so that the probability of being a never taker decreases with age." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "alpha_a = 0\n", - "beta_a = 1\n", - "\n", - "alpha_n = 1\n", - "beta_n = -1\n", - "\n", - "alpha_c = 1\n", - "beta_c = 1\n", - "\n", - "# Define function (a logistic model) to generate Pr(S = s | X = x)\n", - "def pi_s(xval, alpha_a, beta_a, alpha_n, beta_n, alpha_c, beta_c):\n", - " w_a = np.exp(alpha_a + beta_a*xval)\n", - " w_n = np.exp(alpha_n + beta_n*xval)\n", - " w_c = np.exp(alpha_c + beta_c*xval)\n", - " w = np.column_stack((w_a, w_n, w_c))\n", - " w_rowsum = np.sum(w, axis=1, keepdims=True)\n", - " return np.divide(w, w_rowsum)\n", - " \n", - "# Sample principal strata based on observed probabilities\n", - "strata_probs = pi_s(X[:,0], alpha_a, beta_a, alpha_n, beta_n, alpha_c, beta_c)\n", - "s = np.empty_like(X[:,0], dtype=str)\n", - "for i in range(s.size):\n", - " s[i] = rng.choice(a=['a','n','c'], size=1, p=strata_probs[i,:])[0]\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we generate the treatment variable, here denoted $V$ (for \"vaccine\"), as a *deterministic* function of $S$ and $Z$; this is what gives the principal strata their meaning." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "v = 1*(s=='a') + 0*(s=='n') + z*(s==\"c\") + (1-z)*(s == \"d\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, the outcome structural model is specified, based on which the outcome is sampled. By varying this function in particular ways, we can alter the identification conditions." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def gamfun(xval, vval, zval, sval):\n", - " \"\"\"\n", - " If this function depends on zval, then exclusion restriction is violated.\n", - " If this function does not depend on sval, then IV analysis wasn't necessary.\n", - " If this function does not depend on x, then there are no HTEs.\n", - " \"\"\"\n", - " baseline = norm.cdf(2 - 1*xval - 2.5*((xval-1.5)**2) - 0.5*zval + 1*(sval==\"n\") - 1*(sval==\"a\"))\n", - " return baseline - 0.5*vval*baseline\n", - "\n", - "y = rng.binomial(n=1, p=gamfun(X[:,0],v,z,s), size=n)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Lastly, we perform some organization for our supervised learning algorithms later on." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Concatenate X, v and z for our supervised learning algorithms\n", - "Xall = np.concatenate((X, np.column_stack((v,z))), axis=1)\n", - "\n", - "# Update the size of \"X\" to be the size of Xall\n", - "p_X = p_X + 2\n", - "\n", - "# For the monotone probit model it is necessary to sort the observations so that the Z=1 cases are all together\n", - "# at the start of the outcome vector. \n", - "sort_index = np.argsort(z)[::-1]\n", - "X = X[sort_index,:]\n", - "Xall = Xall[sort_index,:]\n", - "z = z[sort_index]\n", - "v = v[sort_index]\n", - "s = s[sort_index]\n", - "y = y[sort_index]\n", - "x = x[sort_index]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now let's see if we can recover these functions from the observed data." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Fit the outcome model\n", - "\n", - "We have to fit three models here, the treatment models: $\\operatorname{Pr}(V = 1 | Z = 1, X=x)$ and $\\operatorname{Pr}(V = 1 | Z = 0,X = x)$, subject to the monotonicity constraint $\\operatorname{Pr}(V = 1 | Z = 1, X=x) \\geq \\operatorname{Pr}(V = 1 | Z = 0,X = x)$, and an outcome model $\\operatorname{Pr}(Y = 1 | Z = 1, V = 1, X = x)$. All of this will be done with stochtree. \n", - "\n", - "The outcome model is fit with a single (S-learner) BART model. This part of the model could be fit as a T-Learner or as a BCF model. Here we us an S-Learner for simplicity. Both models are probit models, and use the well-known Albert and Chib (1993) data augmentation Gibbs sampler. This section covers the more straightforward outcome model. The next section describes how the monotonicity constraint is handled with a data augmentation Gibbs sampler. \n", - "\n", - "These models could (and probably should) be wrapped as functions. Here they are implemented as scripts, with the full loops shown. The output -- at the end of the loops -- are stochtree forest objects from which we can extract posterior samples and generate predictions. In particular, the $ITT_c$ will be constructed using posterior counterfactual predictions derived from these forest objects. \n", - "\n", - "We begin by setting a bunch of hyperparameters and instantiating the forest objects to be operated upon in the main sampling loop. We also initialize the latent variables." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Fit the BART model for Pr(Y = 1 | Z = 1, V = 1, X = x)\n", - "\n", - "# Set number of iterations\n", - "num_warmstart = 10\n", - "num_mcmc = 1000\n", - "num_samples = num_warmstart + num_mcmc\n", - "\n", - "# Set a bunch of hyperparameters. These are ballpark default values.\n", - "alpha = 0.95\n", - "beta = 2\n", - "min_samples_leaf = 1\n", - "max_depth = 20\n", - "num_trees = 50\n", - "cutpoint_grid_size = 100\n", - "global_variance_init = 1.\n", - "tau_init = 0.5\n", - "leaf_prior_scale = np.array([[tau_init]])\n", - "leaf_regression = False\n", - "feature_types = np.append(np.repeat(0, p_X - 2), [1,1]).astype(int)\n", - "var_weights = np.repeat(1.0/p_X, p_X)\n", - "outcome_model_type = 0\n", - "\n", - "# C++ dataset\n", - "forest_dataset = Dataset()\n", - "forest_dataset.add_covariates(Xall)\n", - "\n", - "# Random number generator (std::mt19937)\n", - "if random_seed is not None:\n", - " cpp_rng = RNG(random_seed)\n", - "else:\n", - " cpp_rng = RNG()\n", - "\n", - "# Sampling data structures\n", - "forest_model_config = ForestModelConfig(\n", - " feature_types = feature_types, \n", - " num_trees = num_trees, \n", - " num_features = p_X, \n", - " num_observations = n, \n", - " variable_weights = var_weights, \n", - " leaf_dimension = 1, \n", - " alpha = alpha, \n", - " beta = beta, \n", - " min_samples_leaf = min_samples_leaf, \n", - " max_depth = max_depth, \n", - " leaf_model_type = outcome_model_type, \n", - " leaf_model_scale = leaf_prior_scale, \n", - " cutpoint_grid_size = cutpoint_grid_size\n", - ")\n", - "global_model_config = GlobalModelConfig(global_error_variance=1.0)\n", - "forest_sampler = ForestSampler(\n", - " forest_dataset, global_model_config, forest_model_config\n", - ")\n", - "\n", - "# Container of forest samples\n", - "forest_samples = ForestContainer(num_trees, 1, True, False)\n", - "\n", - "# \"Active\" forest state\n", - "active_forest = Forest(num_trees, 1, True, False)\n", - "\n", - "# Initialize the latent outcome zed\n", - "n1 = np.sum(y)\n", - "zed = 0.25*(2.0*y - 1.0)\n", - "\n", - "# C++ outcome variable\n", - "outcome = Residual(zed)\n", - "\n", - "# Initialize the active forest and subtract each root tree's predictions from outcome\n", - "forest_init_val = np.array([0.0])\n", - "forest_sampler.prepare_for_sampler(\n", - " forest_dataset,\n", - " outcome,\n", - " active_forest,\n", - " outcome_model_type,\n", - " forest_init_val,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we enter the main loop, which involves only two steps: sample the forest, given the latent utilities, then sample the latent utilities given the estimated conditional means defined by the forest and its parameters. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "gfr_flag = True\n", - "for i in range(num_samples):\n", - " # The first num_warmstart iterations use the grow-from-root algorithm of He and Hahn\n", - " if i >= num_warmstart:\n", - " gfr_flag = False\n", - " \n", - " # Sample forest\n", - " forest_sampler.sample_one_iteration(\n", - " forest_samples, active_forest, forest_dataset, outcome, cpp_rng, \n", - " global_model_config, forest_model_config, keep_forest=True, gfr = gfr_flag\n", - " )\n", - "\n", - " # Get the current means\n", - " eta = np.squeeze(forest_samples.predict_raw_single_forest(forest_dataset, i))\n", - "\n", - " # Sample latent normals, truncated according to the observed outcome y\n", - " mu0 = eta[y == 0]\n", - " mu1 = eta[y == 1]\n", - " u0 = rng.uniform(\n", - " low=0.0,\n", - " high=norm.cdf(0 - mu0),\n", - " size=n-n1,\n", - " )\n", - " u1 = rng.uniform(\n", - " low=norm.cdf(0 - mu1),\n", - " high=1.0,\n", - " size=n1,\n", - " )\n", - " zed[y == 0] = mu0 + norm.ppf(u0)\n", - " zed[y == 1] = mu1 + norm.ppf(u1)\n", - "\n", - " # Update outcome\n", - " new_outcome = np.squeeze(zed) - eta\n", - " outcome.update_data(new_outcome)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Fit the monotone probit model(s)\n", - "\n", - "The monotonicty constraint relies on a data augmentation as described in Papakostas et al (2023). The implementation of this sampler is inherently cumbersome, as one of the \"data\" vectors is constructed from some observed data and some latent data and there are two forest objects, one of which applies to all of the observations and one of which applies to only those observations with $Z = 0$. We go into more details about this sampler in a dedicated vignette. Here we include the code, but without producing the equations derived in Papakostas (2023). What is most important is simply that\n", - "\n", - "\\begin{equation}\n", - "\\begin{aligned}\n", - "\\operatorname{Pr}(V=1 \\mid Z=0, X=x)&=\\pi_a(x) = \\Phi_f(x)\\Phi_h(x),\\\\\n", - "\\operatorname{Pr}(V=1 \\mid Z=1, X=x)&=\\pi_a(x)+\\pi_c(x) = \\Phi_f(x),\n", - "\\end{aligned}\n", - "\\end{equation}\n", - "where $\\Phi_{\\mu}(x)$ denotes the normal cumulative distribution function with mean $\\mu(x)$ and variance 1. \n", - "\n", - "We first create a secondary data matrix for the $Z=0$ group only. We also set all of the hyperparameters and initialize the latent variables." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Fit the monotone probit model to the treatment such that Pr(V = 1 | Z = 1, X=x) >= Pr(V = 1 | Z = 0,X = x) \n", - "X_h = X[z==0,:]\n", - "n0 = np.sum(z==0)\n", - "n1 = np.sum(z==1)\n", - "\n", - "num_trees_f = 50\n", - "num_trees_h = 20\n", - "feature_types = np.repeat(0, p_X-2).astype(int)\n", - "var_weights = np.repeat(1.0/(p_X - 2.0), p_X - 2)\n", - "cutpoint_grid_size = 100\n", - "global_variance_init = 1.\n", - "tau_init_f = 1/num_trees_f\n", - "tau_init_h = 1/num_trees_h\n", - "leaf_prior_scale_f = np.array([[tau_init_f]])\n", - "leaf_prior_scale_h = np.array([[tau_init_h]])\n", - "leaf_regression = False # fit a constant leaf mean BART model\n", - "\n", - "# Instantiate the C++ dataset objects\n", - "forest_dataset_f = Dataset()\n", - "forest_dataset_f.add_covariates(X)\n", - "forest_dataset_h = Dataset()\n", - "forest_dataset_h.add_covariates(X_h)\n", - "\n", - "# Tell it we're fitting a normal BART model\n", - "outcome_model_type = 0\n", - "\n", - "# Set up model configuration objects\n", - "forest_model_config_f = ForestModelConfig(\n", - " feature_types = feature_types, \n", - " num_trees = num_trees_f, \n", - " num_features = X.shape[1], \n", - " num_observations = n, \n", - " variable_weights = var_weights, \n", - " leaf_dimension = 1, \n", - " alpha = alpha, \n", - " beta = beta, \n", - " min_samples_leaf = min_samples_leaf, \n", - " max_depth = max_depth, \n", - " leaf_model_type = outcome_model_type, \n", - " leaf_model_scale = leaf_prior_scale_f, \n", - " cutpoint_grid_size = cutpoint_grid_size\n", - ")\n", - "forest_model_config_h = ForestModelConfig(\n", - " feature_types = feature_types, \n", - " num_trees = num_trees_h, \n", - " num_features = X_h.shape[1], \n", - " num_observations = n0, \n", - " variable_weights = var_weights, \n", - " leaf_dimension = 1, \n", - " alpha = alpha, \n", - " beta = beta, \n", - " min_samples_leaf = min_samples_leaf, \n", - " max_depth = max_depth, \n", - " leaf_model_type = outcome_model_type, \n", - " leaf_model_scale = leaf_prior_scale_h, \n", - " cutpoint_grid_size = cutpoint_grid_size\n", - ")\n", - "global_model_config = GlobalModelConfig(global_error_variance=global_variance_init)\n", - "\n", - "# Instantiate the sampling data structures\n", - "forest_sampler_f = ForestSampler(\n", - " forest_dataset_f, global_model_config, forest_model_config_f\n", - ")\n", - "forest_sampler_h = ForestSampler(\n", - " forest_dataset_h, global_model_config, forest_model_config_h\n", - ")\n", - "\n", - "# Instantiate containers of forest samples\n", - "forest_samples_f = ForestContainer(num_trees_f, 1, True, False)\n", - "forest_samples_h = ForestContainer(num_trees_h, 1, True, False)\n", - "\n", - "# Instantiate \"active\" forests\n", - "active_forest_f = Forest(num_trees_f, 1, True, False)\n", - "active_forest_h = Forest(num_trees_h, 1, True, False)\n", - "\n", - "# Set algorithm specifications \n", - "# these are set in the earlier script for the outcome model; number of draws needs to be commensurable \n", - "\n", - "# num_warmstart = 40\n", - "# num_mcmc = 5000\n", - "# num_samples = num_warmstart + num_mcmc\n", - "\n", - "# Initialize the Markov chain\n", - "\n", - "# Initialize (R0, R1), the latent binary variables that enforce the monotonicty \n", - "v1 = v[z==1]\n", - "v0 = v[z==0]\n", - "\n", - "R1 = np.empty(n0, dtype=float)\n", - "R0 = np.empty(n0, dtype=float)\n", - "\n", - "R1[v0==1] = 1\n", - "R0[v0==1] = 1\n", - "\n", - "nv0 = np.sum(v0==0)\n", - "R1[v0 == 0] = 0\n", - "R0[v0 == 0] = rng.choice([0,1], size = nv0)\n", - "\n", - "# The first n1 observations of vaug are actually observed\n", - "# The next n0 of them are the latent variable R1\n", - "vaug = np.append(v1, R1)\n", - "\n", - "# Initialize the Albert and Chib latent Gaussian variables\n", - "z_f = (2.0*vaug - 1.0)\n", - "z_h = (2.0*R0 - 1.0)\n", - "z_f = z_f/np.std(z_f)\n", - "z_h = z_h/np.std(z_h)\n", - "\n", - "# Pass these variables to the BART models as outcome variables\n", - "outcome_f = Residual(z_f)\n", - "outcome_h = Residual(z_h)\n", - "\n", - "# Initialize active forests to constant (0) predictions\n", - "forest_init_val_f = np.array([0.0])\n", - "forest_sampler_f.prepare_for_sampler(\n", - " forest_dataset_f,\n", - " outcome_f,\n", - " active_forest_f,\n", - " outcome_model_type,\n", - " forest_init_val_f,\n", - ")\n", - "forest_init_val_h = np.array([0.0])\n", - "forest_sampler_h.prepare_for_sampler(\n", - " forest_dataset_h,\n", - " outcome_h,\n", - " active_forest_h,\n", - " outcome_model_type,\n", - " forest_init_val_h,\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we run the main sampling loop, which consists of three key steps: sample the BART forests, given the latent probit utilities, sampling the latent binary outcome pairs (this is the step that is necessary for enforcing monotonicity), given the forest predictions and the latent utilities, and finally sample the latent utilities." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# PART IV: run the Markov chain \n", - "\n", - "# Initialize the Markov chain with num_warmstart grow-from-root iterations\n", - "gfr_flag = True\n", - "for i in range(num_samples):\n", - " # Switch over to random walk Metropolis-Hastings tree updates after num_warmstart\n", - " if i >= num_warmstart:\n", - " gfr_flag = False\n", - " \n", - " # Step 1: Sample the BART forests\n", - "\n", - " # Sample forest for the function f based on (y_f, R1)\n", - " forest_sampler_f.sample_one_iteration(\n", - " forest_samples_f, active_forest_f, forest_dataset_f, outcome_f, cpp_rng, \n", - " global_model_config, forest_model_config_f, keep_forest=True, gfr = gfr_flag\n", - " )\n", - "\n", - " # Sample forest for the function h based on outcome R0\n", - " forest_sampler_h.sample_one_iteration(\n", - " forest_samples_h, active_forest_h, forest_dataset_h, outcome_h, cpp_rng, \n", - " global_model_config, forest_model_config_h, keep_forest=True, gfr = gfr_flag\n", - " )\n", - "\n", - " # Get the current means\n", - " eta_f = np.squeeze(forest_samples_f.predict_raw_single_forest(forest_dataset_f, i))\n", - " eta_h = np.squeeze(forest_samples_h.predict_raw_single_forest(forest_dataset_h, i))\n", - "\n", - " # Step 2: sample the latent binary pair (R0, R1) given eta_h, eta_f, and y_g\n", - "\n", - " # Three cases: (0,0), (0,1), (1,0)\n", - " w1 = (1 - norm.cdf(eta_h[v0==0]))*(1 - norm.cdf(eta_f[n1 + np.where(v0==0)]))\n", - " w2 = (1 - norm.cdf(eta_h[v0==0]))*norm.cdf(eta_f[n1 + np.where(v0==0)])\n", - " w3 = norm.cdf(eta_h[v0==0])*(1 - norm.cdf(eta_f[n1 + np.where(v0==0)]))\n", - "\n", - " s = w1 + w2 + w3\n", - " w1 = w1/s\n", - " w2 = w2/s\n", - " w3 = w3/s\n", - "\n", - " u = rng.uniform(low=0,high=1,size=np.sum(v0==0))\n", - " temp = 1*(np.squeeze(u < w1)) + 2*(np.squeeze((u > w1) & (u < (w1 + w2)))) + 3*(np.squeeze(u > (w1 + w2)))\n", - "\n", - " R1[v0==0] = 1*(temp==2)\n", - " R0[v0==0] = 1*(temp==3)\n", - "\n", - " # Redefine y with the updated R1 component\n", - " vaug = np.append(v1, R1)\n", - "\n", - " # Step 3: sample the latent normals, given (R0, R1) and y_f\n", - "\n", - " # First z0\n", - " mu1 = eta_h[R0==1]\n", - " U1 = rng.uniform(\n", - " low=norm.cdf(0 - mu1), \n", - " high=1,\n", - " size=np.sum(R0).astype(int)\n", - " )\n", - " z_h[R0==1] = mu1 + norm.ppf(U1)\n", - "\n", - " mu0 = eta_h[R0==0]\n", - " U0 = rng.uniform(\n", - " low=0, \n", - " high=norm.cdf(0 - mu0),\n", - " size=(n0 - np.sum(R0)).astype(int)\n", - " )\n", - " z_h[R0==0] = mu0 + norm.ppf(U0)\n", - "\n", - " # Then z1\n", - " mu1 = eta_f[vaug==1]\n", - " U1 = rng.uniform(\n", - " low=norm.cdf(0 - mu1), \n", - " high=1,\n", - " size=np.sum(vaug).astype(int)\n", - " )\n", - " z_f[vaug==1] = mu1 + norm.ppf(U1)\n", - "\n", - " mu0 = eta_f[vaug==0]\n", - " U0 = rng.uniform(\n", - " low=0, \n", - " high=norm.cdf(0 - mu0),\n", - " size=(n - np.sum(vaug)).astype(int)\n", - " )\n", - " z_f[vaug==0] = mu0 + norm.ppf(U0)\n", - "\n", - " # Propagate the updated outcomes through the BART models\n", - " new_outcome_h = np.squeeze(z_h) - eta_h\n", - " outcome_h.update_data(new_outcome_h)\n", - "\n", - " new_outcome_f = np.squeeze(z_f) - eta_f\n", - " outcome_f.update_data(new_outcome_f)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Extracting the estimates and plotting the results.\n", - "\n", - "Now for the most interesting part, which is taking the stochtree BART model fits and producing the causal estimates of interest. \n", - "\n", - "First we set up our grid for plotting the functions in $X$. This is possible in this example because the moderator, age, is one dimensional; in may applied problems this will not be the case and visualization will be substantially trickier. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Extract the credible intervals for the conditional treatment effects as a function of x.\n", - "# We use a grid of values for plotting, with grid points that are typically fewer than the number of observations.\n", - "\n", - "ngrid = 200\n", - "xgrid = np.linspace(start=0.1, stop=2.9, num=ngrid)\n", - "X_11 = np.column_stack((xgrid, np.ones(ngrid), np.ones(ngrid)))\n", - "X_00 = np.column_stack((xgrid, np.zeros(ngrid), np.zeros(ngrid)))\n", - "X_01 = np.column_stack((xgrid, np.zeros(ngrid), np.ones(ngrid)))\n", - "X_10 = np.column_stack((xgrid, np.ones(ngrid), np.zeros(ngrid)))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we compute the truth function evaluations on this plotting grid, using the functions defined above when we generated our data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Compute the true conditional outcome probabilities for plotting\n", - "pi_strat = pi_s(xgrid, alpha_a, beta_a, alpha_n, beta_n, alpha_c, beta_c)\n", - "w_a = pi_strat[:,0]\n", - "w_n = pi_strat[:,1]\n", - "w_c = pi_strat[:,2]\n", - "\n", - "w = (w_c/(w_a + w_c))\n", - "p11_true = w*gamfun(xgrid,1,1,\"c\") + (1-w)*gamfun(xgrid,1,1,\"a\")\n", - "\n", - "w = (w_c/(w_n + w_c))\n", - "p00_true = w*gamfun(xgrid,0,0,\"c\") + (1-w)*gamfun(xgrid,0,0,\"n\")\n", - "\n", - "# Compute the true ITT_c for plotting and comparison\n", - "itt_c_true = gamfun(xgrid,1,1,\"c\") - gamfun(xgrid,0,0,\"c\")\n", - "\n", - "# Compute the true LATE for plotting and comparison\n", - "LATE_true0 = gamfun(xgrid,1,0,\"c\") - gamfun(xgrid,0,0,\"c\")\n", - "LATE_true1 = gamfun(xgrid,1,1,\"c\") - gamfun(xgrid,0,1,\"c\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next we populate the data structures for stochtree to operate on, call the predict functions to extract the predictions, convert them to probability scale using the `scipy.stats.norm.cdf` method." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Datasets for counterfactual predictions\n", - "forest_dataset_grid = Dataset()\n", - "forest_dataset_grid.add_covariates(np.expand_dims(xgrid, 1))\n", - "forest_dataset_11 = Dataset()\n", - "forest_dataset_11.add_covariates(X_11)\n", - "forest_dataset_00 = Dataset()\n", - "forest_dataset_00.add_covariates(X_00)\n", - "forest_dataset_10 = Dataset()\n", - "forest_dataset_10.add_covariates(X_10)\n", - "forest_dataset_01 = Dataset()\n", - "forest_dataset_01.add_covariates(X_01)\n", - "\n", - "# Forest predictions\n", - "preds_00 = forest_samples.predict(forest_dataset_00)\n", - "preds_11 = forest_samples.predict(forest_dataset_11)\n", - "preds_01 = forest_samples.predict(forest_dataset_01)\n", - "preds_10 = forest_samples.predict(forest_dataset_10)\n", - "\n", - "# Probability transformations\n", - "phat_00 = norm.cdf(preds_00)\n", - "phat_11 = norm.cdf(preds_11)\n", - "phat_01 = norm.cdf(preds_01)\n", - "phat_10 = norm.cdf(preds_10)\n", - "\n", - "preds_ac = forest_samples_f.predict(forest_dataset_grid)\n", - "phat_ac = norm.cdf(preds_ac)\n", - "\n", - "preds_adj = forest_samples_h.predict(forest_dataset_grid)\n", - "phat_a = norm.cdf(preds_ac) * norm.cdf(preds_adj)\n", - "phat_c = phat_ac - phat_a\n", - "phat_n = 1 - phat_ac" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we may plot posterior means of various quantities (as a function of $X$) to visualize how well the models are fitting." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, (ax1, ax2) = plt.subplots(1, 2)\n", - "ax1.scatter(p11_true, np.mean(phat_11, axis=1), color=\"black\")\n", - "ax1.axline((0, 0), slope=1, color=\"red\", linestyle=(0, (3, 3)))\n", - "ax2.scatter(p00_true, np.mean(phat_00, axis=1), color=\"black\")\n", - "ax2.axline((0, 0), slope=1, color=\"red\", linestyle=(0, (3, 3)))\n", - "plt.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex=\"none\", sharey=\"none\")\n", - "ax1.scatter(np.mean(phat_ac, axis=1), w_c + w_a, color=\"black\")\n", - "ax1.axline((0, 0), slope=1, color=\"red\", linestyle=(0, (3, 3)))\n", - "ax1.set_xlim(0.5,1.1)\n", - "ax1.set_ylim(0.5,1.1)\n", - "ax2.scatter(np.mean(phat_a, axis=1), w_a, color=\"black\")\n", - "ax2.axline((0, 0), slope=1, color=\"red\", linestyle=(0, (3, 3)))\n", - "ax2.set_xlim(0.1,0.4)\n", - "ax2.set_ylim(0.1,0.3)\n", - "ax3.scatter(np.mean(phat_c, axis=1), w_c, color=\"black\")\n", - "ax3.axline((0, 0), slope=1, color=\"red\", linestyle=(0, (3, 3)))\n", - "ax3.set_xlim(0.4,0.9)\n", - "ax3.set_ylim(0.4,0.8)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "These plots are not as pretty as we might hope, but mostly this is a function of how difficult it is to learn conditional probabilities from binary outcomes. That we capture the trend broadly turns out to be adequate for estimating treatment effects. Fit does improve with simpler DGPs and larger training sets, as can be confirmed by experimentation with this script. \n", - "\n", - "Lastly, we can construct the estimate of the $ITT_c$ and compare it to the true value as well as the $Z=0$ and $Z=1$ complier average treatment effects (also called \"local average treatment effects\" or LATE). The key step in this process is to center our posterior on the identified interval (at each iteration of the sampler) at the value implied by a valid exclusion restriction. For some draws this will not be possible, as that value will be outside the identification region." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Generate draws from the posterior of the treatment effect\n", - "# centered at the point-identified value under the exclusion restriction\n", - "itt_c = np.empty((ngrid, phat_c.shape[1]))\n", - "late = np.empty((ngrid, phat_c.shape[1]))\n", - "ss = 6\n", - "for j in range(phat_c.shape[1]):\n", - " # Value of gamma11 implied by an exclusion restriction\n", - " gamest11 = ((phat_a[:,j] + phat_c[:,j])/phat_c[:,j])*phat_11[:,j] - phat_10[:,j]*phat_a[:,j]/phat_c[:,j]\n", - "\n", - " # Identified region for gamma11\n", - " lower11 = np.maximum(0., ((phat_a[:,j] + phat_c[:,j])/phat_c[:,j])*phat_11[:,j] - phat_a[:,j]/phat_c[:,j])\n", - " upper11 = np.minimum(1., ((phat_a[:,j] + phat_c[:,j])/phat_c[:,j])*phat_11[:,j])\n", - "\n", - " # Center a beta distribution at gamma11, but restricted to (lower11, upper11)\n", - " # do this by shifting and scaling the mean, drawing from a beta on (0,1), then shifting and scaling to the \n", - " # correct restricted interval\n", - " m11 = (gamest11 - lower11)/(upper11 - lower11)\n", - "\n", - " # Parameters of the beta\n", - " a1 = ss*m11\n", - " b1 = ss*(1-m11)\n", - "\n", - " # When the corresponding mean is out-of-range, sample from a beta with mass piled near the violated boundary\n", - " a1[m11<0] = 1\n", - " b1[m11<0] = 5\n", - " \n", - " a1[m11>1] = 5\n", - " b1[m11>1] = 1\n", - "\n", - " # Value of gamma00 implied by an exclusion restriction\n", - " gamest00 = ((phat_n[:,j] + phat_c[:,j])/phat_c[:,j])*phat_00[:,j] - phat_01[:,j]*phat_n[:,j]/phat_c[:,j]\n", - "\n", - " # Identified region for gamma00\n", - " lower00 = np.maximum(0., ((phat_n[:,j] + phat_c[:,j])/phat_c[:,j])*phat_00[:,j] - phat_n[:,j]/phat_c[:,j])\n", - " upper00 = np.minimum(1., ((phat_n[:,j] + phat_c[:,j])/phat_c[:,j])*phat_00[:,j])\n", - "\n", - " # Center a beta distribution at gamma00, but restricted to (lower00, upper00)\n", - " # do this by shifting and scaling the mean, drawing from a beta on (0,1), then shifting and scaling to the \n", - " # correct restricted interval\n", - " m00 = (gamest00 - lower00)/(upper00 - lower00)\n", - "\n", - " a0 = ss*m00\n", - " b0 = ss*(1-m00)\n", - " a0[m00<0] = 1\n", - " b0[m00<0] = 5 \n", - " a0[m00>1] = 5\n", - " b0[m00>1] = 1\n", - "\n", - " # ITT and LATE \n", - " itt_c[:,j] = lower11 + (upper11 - lower11)*rng.beta(a=a1,b=b1,size=ngrid) - (lower00 + (upper00 - lower00)*rng.beta(a=a0,b=b0,size=ngrid))\n", - " late[:,j] = gamest11 - gamest00\n", - "\n", - "upperq = np.quantile(itt_c, q=0.975, axis=1)\n", - "lowerq = np.quantile(itt_c, q=0.025, axis=1)\n", - "upperq_er = np.quantile(late, q=0.975, axis=1)\n", - "lowerq_er = np.quantile(late, q=0.025, axis=1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And now we can plot all of this, shading posterior quantiles with [pyplot's `fill` function](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.fill.html)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "plt.plot(xgrid, itt_c_true, color = \"black\")\n", - "plt.ylim(-0.75, 0.05)\n", - "plt.fill(np.append(xgrid, xgrid[::-1]), np.append(lowerq, upperq[::-1]), color = (0.5,0.5,0,0.25))\n", - "plt.fill(np.append(xgrid, xgrid[::-1]), np.append(lowerq_er, upperq_er[::-1]), color = (0,0,0.5,0.25))\n", - "\n", - "itt_c_est = np.mean(itt_c, axis=1)\n", - "late_est = np.mean(late, axis=1)\n", - "\n", - "plt.plot(xgrid, late_est, color = \"darkgrey\")\n", - "plt.plot(xgrid, itt_c_est, color = \"gold\")\n", - "plt.plot(xgrid, LATE_true0, color = \"black\", linestyle = (0, (2, 2)))\n", - "plt.plot(xgrid, LATE_true1, color = \"black\", linestyle = (0, (4, 4)))\n", - "plt.plot(xgrid, itt_c_true, color = \"black\")\n", - "\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With a valid exclusion restriction the three black curves would all be the same. With no exclusion restriction, as we have here, the direct effect of $Z$ on $Y$ (the vaccine reminder on flu status) makes it so these three treatment effects are different. Specifically, the $ITT_c$ compares getting the vaccine *and* getting the reminder to not getting the vaccine *and* not getting the reminder. When both things have risk reducing impacts, we see a larger risk reduction over all values of $X$. Meanwhile, the two LATE effects compare the isolated impact of the vaccine among people that got the reminder and those that didn't, respectively. Here, not getting the reminder makes the vaccine more effective because the risk reduction is as a fraction of baseline risk, and the reminder reduces baseline risk in our DGP. \n", - "\n", - "We see also that the posterior mean of the $ITT_c$ estimate (gold) is very similar to the posterior mean under the assumption of an exclusion restriction (gray). This is by design...they will only deviate due to Monte Carlo variation or due to the rare situations where the exclusion restriction is incompatible with the identification interval. \n", - "\n", - "By changing the sample size and various aspects of the DGP this script allows us to build some intuition for how aspects of the DGP affect posterior inferences, particularly how violates of assumptions affect accuracy and posterior uncertainty." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# References\n", - "\n", - "Albert, James H, and Siddhartha Chib. 1993. “Bayesian Analysis of Binary and Polychotomous Response Data.” *Journal of the American Statistical Association* 88 (422): 669–79.\n", - "\n", - "Hahn, P Richard, Jared S Murray, and Ioanna Manolopoulou. 2016. “A Bayesian Partial Identification Approach to Inferring the Prevalence of Accounting Misconduct.” *Journal of the American Statistical Association* 111 (513): 14–26.\n", - "\n", - "Hirano, Keisuke, Guido W. Imbens, Donald B. Rubin, and Xiao-Hua Zhou. 2000. “Assessing the Effect of an Influenza Vaccine in an Encouragement Design.” *Biostatistics* 1 (1): 69–88. https://doi.org/10.1093/biostatistics/1.1.69.\n", - "\n", - "Imbens, Guido W., and Donald B. Rubin. 2015. *Causal Inference for Statistics, Social, and Biomedical Sciences: An Introduction*. Cambridge University Press.\n", - "\n", - "McDonald, Clement J, Siu L Hui, and William M Tierney. 1992. “Effects of Computer Reminders for Influenza Vaccination on Morbidity During Influenza Epidemics.” *MD Computing: Computers in Medical Practice* 9 (5): 304–12.\n", - "\n", - "Papakostas, Demetrios, P Richard Hahn, Jared Murray, Frank Zhou, and Joseph Gerakos. 2023. “Do Forecasts of Bankruptcy Cause Bankruptcy? A Machine Learning Sensitivity Analysis.” *The Annals of Applied Statistics* 17 (1): 711–39.\n", - "\n", - "Richardson, Thomas S., Robin J. Evans, and James M. Robins. 2011. “Transparent Parametrizations of Models for Potential Outcomes.” In *Bayesian Statistics 9*. Oxford University Press. https://doi.org/10.1093/acprof:oso/9780199694587.003.0019." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/vignettes/Python/RDD/RDD_DAG.png b/vignettes/Python/RDD/RDD_DAG.png deleted file mode 100644 index a73abc16ecea1c7b31070fb5f0a5648246a23578..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38039 zcmeFZ^;?u(+crEi4md+IgCK~sbb}z>NT-x^gGe{h4I`8?Z54DnsF6vi^y2%;; z!1~`|c)85KXD4cvLwVct37rYZFCoDbUE>m$n0oNLZnq?gsqpTWzm1boY;Dl`QtC;5 z`gJAiR0MA?jOYW7SYWlSep6U&V|x`7CfOc!FqJGd zH%E_VwBC?uXQ}=)vf~K&_RK6tt*m%bte7rYkB_P+YTfXavnUmwel5{H(z<+@B=gX| z{poPo8h6iWk5p*|QRG(7xNmBOyYuXfdWXmImx$<%P3~i^Qs*HvJUs zos&isi;9X$*u}z1P(xbwf0~2eL}+Z>+?)hCI6OT)**$sK9bK$BxC8_QI5@dExVhQD z6>P5F4sNDiY!0rp|J>w%?;~yLYVKm|j(Yk-#`7d^s@c$ zlN?9M|7)aItf8{O8`Fsqpouf@-#2mQVGhZS8@1z%xYe^E?p#d;S0CoBuxX zUoCb2+meru^WQE1_09jgrIxFuiXS>hiv|NRu$ zSrki{<3DpIij`9lTn2%NL*%6;9(h5xUt`bT(rWrc!n_zLO@xDiktaz2RTnV8QI=>T zGLeN+48*CrpkIi_tIex)?h~O%dR^nB;!T`5a~k_(2r-01iBciTs9PLzvT0|hDQ$m# zy|6By%JMu}tohb#wb3A&efoN~#(HYKWh%>i!#nG^j7&=#r9>wV`|oQxv6D%(@~h>4 zKLOvGpmW6ggZ_OPK_moYK>W;$3mFIb&kI8#7K)}mJAz#=F!YHMER5k1;nXj#_XnktuhE$A-&M)pOIZPC8-V@L$0%flSmgx_%>VAHQBx+~~L0B)0)Ln>%$Wy1~j zG5smtmh9i{zx*7+8>?Ucl<=)~IqDUk{Xz1>+awZwts5D3(jER+Jq4*o3d&Zu)zTy4 zfoma~Wh*24>wLuT!A;s1tIQ*G>z}4yc^-WquTC}g|Fxc{^;Wx{uga*+C(mJ_xjwvV zKCH6sB*W|1dTQ_f*N?p?J74Ty6zSw1j|lxF;$QT7^L?)FZ82!nly@eA%nt`XZqn@* zi^4yhql0Lk4`$WHooOc!D$l6_CPYuZmLX=uA4k`F61Wu-0!r#uK8B)8peAw zc`CHBF$AxLJlTs`#Nx!sfHzVyudEum_A7DbFoJo$r!!Q$P2VW0JbxPLpg8X@d3>?Y z({FId^S?T;_}1f0@H<1ubC(ET?9xfSV^}rd{FB+`*}jKgNnOkv9bH)>U;(--LI|J4SYyDmczFF(-p{NtYq0wJO7O4UIh zMt(d;{E@hgE6qBN3%o*H@eVrR7@}fA1&(@SP2k-XtbJ^9()*m`OSfgguYF%s zWCCcXK4H1|kLS-q480c8F$sRL%OYSsLhiiQ7jJ--7>HT(`@@Sn7;rhn6gMszZ{O6a z9YP$Dni3&=M3r+XK|8JB}L`>4K8(e z?;=jo)F5IKZnGv`8|py0bJ*BrIOBe1|F)@aXG695$5@PAC=s(r;8&)2R1UsnzE`@y z-%S#j*jaD?1&cRv4`0}0`*xT-LorSEioW;vF>2^EB)!?~2Th?;I*pi%BCHC;dr~16 zV!3Mr{<;S-MRmVaB8a9UAIDci?bpjh=DKf9-9jXz8C3Gse&AA$MhAjbg~;Jn{AX2d z!skQf5i$g^Y0L}|G0h>^w7IrMaaks9B_aai$M}Aw&)}app+FG0L>Zhs2p^^kl~-wx zmkzc!-3{ktLi;h_Lppq=5y!PO$k)TW?*Cd6#LCqi6{SQ(e=lmUUf1;D^rP-_2$?B8 zD)Kp)IOljPfxlZz3WjK3XmmEePfKbV?6ET^Gi)Jko(uLB2A2reY}D@(!Y-?N40v%a zYiAe!KT~arx-IPW4n@!P>P}Fj?FL?=lc~{2l;9YqtITUj*-Ii^;HAc$EGz!r{}~L? zU0%6;FvK&ercb4O&-s@dx4$%P8@MjiEeaAhd0;5d}NW%@pqVM5T$Z%+}oUi-T9$}pX!03%| zN^A8Unq*8woWN+Kfvt=CxQ(xE-Nyt?Yc_o%6a2&d_vJ5vd+|?e9>$WFz_KYtd>+Nz z;b))4syb3a=kyDGS{VEg4_t+KD?GEN+4jkc)Q%cn>&wyi5%}1w3w_csQ*=%^_tU(g zm)GkGhX&FiV}tWm;ZvcqW=*r{$?klUtAR`nISGt_;`l5{Dgw`4;CX}`)dT1g#tN$h2v9JoHR+nObNkASrdjL{*IJR4Memjb&?O#f#VNj-ZRB?-kGb@(qmA~BC31x zStI-FOi%~CFXoc}h5bHwQdE=cH)5h^pS5(NUOBeU60@p`UAYOB;aN$w6igdEQx-m% zF<U zzGah)8EI!MdVjmC@8x?q%-`$Tabd{izWE3m#A`4AllJJUOs{5Q3O+S$0vrwQF1hxL z>p=$-FfynOv=UbC#R}~rYU#}0hvR;>AV=34kEwwGUQiun))PtinKxxq5nl*I>*`d_ zc@SQmKArpY(d=s~jnOdR1>K5_>9mbsABy?w=!@V~eLs+7>}3Y43|(~kMJlg@6<6MW zlX=_dcX1ND743KX{*$C~_mFJEKfkY7h*`(^dIY3{1=^yZrnY>Jiw0u`X@ysXFU5ZA zl0R?~S4!u;SJzE97t3{HX(|fMFvqxj()s!MWh|D_H#+g43Hi|M>!)^x1rnhO;yi#4 z`=W|pw7b+{jB()6#Cssc$wezMwmp^`(uOZVR8!GLj|k<|Ghg_ofH8QBMZ4InP&K>a zByrw*HHzWU{urbG3bwIYHp{|n+p~t9)F{$JTh*)cV@=q4cNjL4T49?MZmF2^BBnd$ zTM$&@$e~J)y^cTYV9=YlULFjZ9dCa2u%z%_i(g|-HtE-0!3?&{7WEr=BIWMtiAr?N zOe-xd-Hzhd$Z6lM9`v<~j6YTNKS1w)rTnAY8NrR+U;BFirdcn6c)}=v1!l+PA!JTA zjgu>cgzRD3!-9T31WwvB{kkUw)noU=H*-c`fBpFKi+ggalx_lb&{`E|gNAvOuqpjr zzNcw+IEs@})lwI2X{+zciu8@X>oX~>Uf5N(P3`cGe$4Vc={-o%t`B?&Q8#8 zea_~Jap;^I>P_J7SHxu*xx;M{>GkL8^ts{cuLq&{mtD27{cPu({CD^r9{ozI#=?e+ zi(n8ku82iBAO8IF`=?Jz$zsRv^Q~m@CXgj!C6(+dyE3aLHVF82WdK(Xr-RfScZJ-< zsAV4$);6n1ku?9g*p}RRyXf8RcRwmpAKt%1TJ|CKuAS;KDe&8?PrJ7r0?#&eI+#;9 zj2(EOH_}SSQ~f7KUJLbp)-krf=pRT$i2X`)!}VuC%*Dc|!$GR|K7X@{o#`I%yb40G zpFtf*;BlLTN!;v1sScWx*B)`>Q-z_B^SBHNDHn^t_U}pMcGGp-l^Zn$U3e@bzI$Ca z>stbcDBt#_#pS3cA=wfBVe)HGPlQ4CEmM?z_ke&G(@cwJgK=1?7Z zu>t?P4L6zV4JECF<+BU>@49AV(;4(E&Ok6|Hca9|>pCi%HpraUlXd30i9%(H#-I|? zr`c})l=;3L(==X9qTcl1CnEI@vy%*{ZKQIfyDe(zF++V4+`sa@2KiuN&62Jist*^6 zU_;QiJ)PygP{rbOfaFL>k;{aR1QD~1;>#;3?PpxNgiz5WijvAxD4N|V{6qJFBE@wm z;ZOcHy3s=}m7p=z3ziJ9~ ze6oTM%kd6^R3M3!GUJQBW7~ZKe$qrVhJ-j5wdwqKCb`Rg!xj%Dya1oO79RyoB$58Q)wSL) z3$iR*x6^knba8iKX|LOC#%XS;&CYUT#jDVCiGHMx+!9UddU9vvb92)j0nGhu=}UQ= zu|Zn_>PLU31FllrxbY=*MUE#m>tzq$r31%+a@HIliVI9*3m&x zanz>qAT98j~GAo(siG^}VIIV*;T8%dSLo8qZNV7QUWB_VzZ~Us0FuX!R4;gbXqQ{XD8dUpWELDHC~<@ zV1MgULUlH8``muxlF{ozuq+lOJn&1^$ZI8RbtQt5J@xhY60NSt|Km2D^pGXFvrKvW ze9g3;0dm)kAwm#+YwJ>U@gMWI5SpJO(ooEZHIuw4ba3^BGNK9o$^CS9;mXSN()}F; zXS{eCNat`8=?`gw$WfgkwkzFXW}A~GPd*dfyPmHEi$XWQwcaqHLaDUE9hij&_IJUHVq=iGY)MSiv( zGB+iCOAxOBOMS@wv}T`9O#piKF!m1r$o(~ZdRnCnUMBUfyaVEWtPZ$at};IL8h;1X z^p9@J;Pli(KUVFji2}tPd9&vv9PAzn^s!QJSQF(du&Y((3txgn`k!vnNkW3xI)iTP z%+9&sn&=Ux)!|c$vhq31%b99O_II7+J!jV_R}u^*$V2oUP3b%S!kP_nU;Wv<#m?n( z^u4;K#Zu39;rH*;4jrv&W1LBe-8k_u;7ha-F-3{ui-_-N`^;cE$=@ak!yuPH9cK^f zXQxdV`TbD*HWS$rI~F@AZ?v}+hHOtb>bgleGk&w8aTV)2wlyXwYI;}vhUIkJ&adVw zt0lay*~f9q;K;Ox;#diW&2Cl7fz{!AFGB?gg6URer%JUH{d?{4kl(pT5;Fx18XOGw zk1GQVk2}$-!nr4dJ5(taOYMHW#=j3ppoym?MOlw`6D9*HJ0RaVFiRY7--){owtrNv zbr+opJ=AuNonDSYz@>-JPcx3U=Fe({)?f;oR(y-+Pwfl&ZKYRy%Hr>&&%v-Au_MrfeBhwT^HgeZ^@vlPfzSyF*7{~`zkgM)!n-7^Fhx5@>lfOUkx0~J+s?@6~ z{61S#bob_QFgE+Uty7RyM;2xKKQ>9ea|Xqy5Z29fQSdrX21r9xGK)_1bz$gtpg3V* z@oS;k9SsnHYB96fPYlkF*d_awNUOcp=+*7YzU*y28` zuv%zf32Y!7_fF57m!eN9mqSVC5%)rojAIIt-&=kZzUui=lx>u+^&9^ctJC~l5$4A~)g7`adGYa??S~P=XZYOX}F@-f( zTSIcC-VAoUHBI7D&uR3?(%KJ_AL$89+C1**2#;e%E#8Q|6Nx zc}WOg?!8|f%auB@$l+h|{WOgwYTA`4;95BF1OPsnPm64v7;s>jNoY)&4G9<~Dv&Qv zHCq%zqF;Sjz;z>1Xtyy0AARG3bw?{$C~Pv&aiRG>|DaP=!0%tMu5qren*nJ@!OP#r z)clXeAFhH>QTyi|*(gY=QB|1VYxcQD-f=?B`wOo~thKKMm_C;MHnEqZ;)I@6nwq`f z!65)Pm8Ka=RH7rm2)agyt_u=9;9=&c0w1c!+IpMqIDJA(hV+N_dxFv&*WvwjIs1=g z#UX?D7F`C}FwhRmEOoxW4(4s1<9rXF*M@TNAlTU%e7nkFTvX`^_aJr{#H?{hdY^V( z1+w{YnLyDb(|8L}x;QGRzz@v9djk;yr8f@wv>ShbGBmi(eB7HlP%DX;RW~Q&`Pie( z{c0T6O7cUCA5d<}8NBj&3amWe;78)$ z2u$de(&Aic`tMVo-V8-b1=qni(hNlJH~sE&=?Ck5zU8TYAyS+LIUFMT><;b+W_|== zk2Fl4&?!T=snwWUB zieH{=UREC1m+=R;BMKo0!M~Z-xPIQ!7t7OlLvnq+3VmC|$4w%~Ph}#i5IZx`$)roS z!Qrm8`@@h|O@e@?af7TpL~(`%QtV1iA*prWtzx|Qm!p|441&~di})O=amXT!Z{#La zkgXh3+}@H{@49Ul=_-@CcblhZ0&a7_J{+->0Mh6^lR(*rA7Z62)^6P1zhqSUuy~J^ zlL{K6XETt)B^6A`$-6UF%!R=Ki6xjR2&ii{R!hSfNfGd35XUX>)D}*J(=Y_yyg|`C zw%8Xs%A!+|Unl`v;j4oa2;Ew#$X^Z#f4b0|%p!pCUA-v%j-In^FGSqC#1S^Bb}su7 zTb{}QmR1%>TGh-MRe&bMi_8dj(&K5cK$wu5`$~n-qpfIN^J4SWXQ!Byg11)QNBmGw zBbCj%ivl5vI>>^LZaEdAG_P5x#DqAcR@SC{7<=@Jt&$c>i|lkn4gQfoYdRxdgu5DmVr4^PN-(g((g zgPUt5qH|0;*Wg2|Ry0Aa1c7P-c2lfD$p#kPp?5F&Okfdwu|Lo-T|#VnhxR}h=`@8v zdPmJ?;l~54&@;tNGR$9bIpNebh9lu1XU{YVR@}?0GkuRa-t2CNFJN&VQc2e5171oqp?%U{KAdsQ%@y zX9v0&Y4G7~&lB}M{t?aYE7;@5Hg@jibi1T!OiKXB5neIT*Axe#0821ROGylBV?5X3 zXuM|<_d5aSQP%I@me`l*z#K%m$YoS+K?ITWjkAK zd5z4UIGXRT_MS8!bKms+vmAm?jLWGX6?0hKZvvMC4Q_wv^}En?D)i4><-AG ztlm)0Vo4#Xw8_o3`{7GP)>+=NoZ6Xm-S!kn6OB0#L?;!AYvY}+oIXuSwY)5l%PnxD zM?7v=@_$z9$2#5)GTPaIb1X?4mTpFgxV5Y9*~81HG|-GzW>bft*u)5xZwOtQ1Kb8%Me`=}8(&=NZHL)vnKI*! zc8i6dEkD~I*%>UHhkiVe0JA81o?adk1qtm3nA@XQzVFWxxZEMnxxxdFkgcbS&kQl- z_0(-vFKt$+a>^gqDI^V0b~8)vUT0*r_Fwq4DWz-w04EC*ox~<#XKiHYt2f8yPcwZV zJ{yqv^iAT5{~YjMO8PIW+CQM2(DdJi(rFth8mbIGM9YXnf+^s|C7MOVY0eAL!L}q; z3pq&1FH-m-j~*Q|97gO-bp!;A=Stnw$JQ-UxjSC0{-!hKMpN}D;GlYA?+W!Ks*37q zM9TV&oe&bT`t-c5GjNP$mCf?+-Oi9Y7d2-H`z8d>9O9@K4$LweoXJTkj_!f-5{ zc~Sf^`WA};L(_xN&tmT%RoEef+&AeqKff)?rqQl6GTfWB&r|C{&fR1hQV2?s%*Tvq zNv8s64|<_bmQKDC07P9~TC54%geiAPpfOhkim5&mvVvPgDIOE)mA^@kzcjfPd7aE! z_k!f^v9?{Y@f|l~thljo$p^>gQ}~Z>8mYpZ0sBqLV4spK0ONFb-=0xVKiMOYP_r`p z6yGFUBhf@p71sCb@Y_w)Sl*aQ%GT|L%6(8y!RVjVJ~WOhG@uyyDR!7X8u_-e{c)|$ zcsJfitR>B>`*y@{I|^%5E|p;N*X20eJtZ9pbKs(9QnJdkn~kJ_o2X3bykl~CJp10+ zhG9^<+Zh%*tYhr=*}3-YA!`()rj}On{CW!9MG@H0p(P6!UlNSA`wvzkc5yn6Wb$-j zP@wDfMTsl%<@2ic#-m(e?*qE~3I3R{%6=j!Cg12I&uT7b0w+;5%NTjAMCUPUQhD_E z`KFpSx8fxq@%K@})k!9N04-<*m3;asVdsgCbWR#)Kk2JmrfdC73-uH|JzoA5hzDWvp+b6~$g-+sZQ4}g z<8y<7a6|}zoTH>l*L!5DB&5Cx6ve&(uEm^A(xMIW0*gN;f=5xF8d}6D;C)~r#H*&Q zGu34AG^i z!mmfbliqhtGpUM0292l7QH=rzx6yPu>!EXf$HHbnD>h{fhS0_F!+JyI`@58jdmM|k z%O4*kDLJs{Su!L-W{T&p&3{2H4iSX%nA5*6&zG)` z>JtHGZ=!_3sgP=ENpy|(f;Qo;ScwM;hQ&j9O?P(60Ji?LVy4}%W$qguaxET!U&1GX zUkqM!0ybPamECaCxha!|VVRL`zMm^dAxu6BOL85Ab{@OksgPHLe6ETx?|WclRf>#y zu)rdziz*i-Of_;>@Q{d(?tJN@i0Sx`)zzOvUQr6VF|rJs3CVSmfF8BK+s`W_2;}qz zwDj{g4`0qH=W(VdXITn$QxQB$!+C>e+y51m4vLCeda9_%P|z#6cif+N3A|1hJh4ON z%XnHX`car{s#4#oV&lMut?Hr5ZVgTLph_Cdwj)- zTvm&G!Akj2zI3c6_ln=~#Dj2@CMX6~tZrElQ~zR^2C4%ipnrDsotn>n<k7bmA5o zeN+M)!@ zp2N~Gz=;8UP_O}E=M7f?7nKg{K5GQz7GT5Vs4BsA=Ud(j%9x3lpr`}Cs%_MLbDi0( zvzv|vg-U4(hf(W{(KFq<%V1uvyWk{9fxi0JrBe=N!JpphaD3kEPh@x=bmL~}=sa~$ zD;OVX**ATV3bEcWQihRUQ>V0d`>lu=AUsL!SR;YZ8$F>&bhL4&<6m1TVgCHSG5qSS z(lw*(^zRB3hV z%nuOwwGIn{nCznxOEpmiV7akgC#?|VBNed!)`z@g6c9L|3J<%PP(`mQv{Rcjne~|t zu<;;8O=Wo_9XgoURFDd0`sn1Yol!7^BIBj-L8M?v75%lv(|`f?ggO9P!TbqO0dM^P zVzUS~=aBx(Z#ivgzfR3hYpg%NxX>i1+y#p%95<2k-KC(*3fx>U?&Q!f&Cg*7;rI1@$j+KQ3e+6^V1ClX2 z?C9oap&pNO3}ZS{*!1UutpP(M4fFu8^>3$USc(4q`+_K{AO$dtvQXmD`za7Bx6_?3 z4RbLL<}5?ey$X;O13*5Q*-^Y9pjaTQvHe7UpGnCdpZYMvu7VJ(_vgL1A>H|OlnZ<% zBw^nl_hiajmif|zXi3%RvdbSnptmrgtWu>`@{#vk^e+0o)X`z{rM?_!L{d@}g|pxM zp?4yj7b~thjj7B-+f5dmX1|m{r?Bd2jMoXL!>g}@x3&-W$DK~Hh4boYKtivREePDj z=d`4CY7=D|#KoYvBq07$XGapn8Nt00yubRK1Iy+i&@&hkxaa7;zJ?6ccvFe!7Z49U zm9Ub$m_WXz>W0B(jp9s$U?_wLG4z+Nm?D(1FNahzADG%b+aoQ>>?g1ivAMb469(eQ zT)V{?c|9>EOd2q`w@R<^f_U18V0ZM30_c{I7^{0`vF(}Rofxj!B2~(9fR6R}lB#oE zhI{PJGg*yf>OFdn%(d8_sq*lvtIuwU1s16O5{`%oXvOwZSVNP3O#n%1=kNJm~yg9j;2*4I- zxq$$1M3T-aCtq_}Z8_MpJzHZ~6PoS=yh+y9wbwCUyUC;`8=Hoa9|ZAbOh1tCoY;7z zhhN65*cU=sUMZ${s2L{KG5zN0;LEZBZcQCO0CU`O4>e*xC^$5QcT+>z7_H-&&W}weS)#3kD4@!)X+nOnw0GV z@*rt>^mt4P|JLV7IpV1hJp<1L_xdQcj51UIL&5RhVT)avLna0$x7~R??Ou1F*)ag{ z7~}B*Kud(Dl?{7dKOzj2#h(+l*Ki(>`Lpv1Db3P5o$ZdP%z@wO5B( zkp^0W=nP6@dIP7Dla!kbY8s6A{V4I}^d2gr^-MO`-t$ys zDHM{&m^M_!TS`pGlbKWOd=5a*eB>I8s9k;#fUn}S?>lwDkcHC20^n{Stian3)hO$Q zAUWh>DfoqL;M9pTV5_nJu6Af|6N(jddz#ZS5yCq@_Tfh`k?2=13QaDy9kA5xctGTv zF9y+^{8`FVJm|UFW7W5;_j)YIxh>>2{f3se1pLmAo=``V@!C+02p=l#14dzvXT0s) z&8p<(;TUnzOV+uVI5p3C=ZE+IbP4Z`eTnRx1=@=V{8^q^LZz$PPmO1K(6lsB9P$Th z3vbIlK2s{FzK!C1-pOR86;ZdZ2-)9p(v%8x^y>~j-e6@N4sY|3YKmg_AB z;x4-|j+xH+Ya?05sLC8^wu-M=kH}GNao^JBTp5^<{$hT6UxtYjdEx2M6o_z&M-D2~ zJ1#B8x-z|VR=ifz{F5o~L=FUhfMp1YEI$aQRg~Z!wj0jdwaM0ece(WE%2+5zfLAZ= zYB0$k=Ec;V5OH1|mUi3$%3SG?_*Zhogd_mlx3ROgykgaXT!_8{m5XgjHU{dP=r`BCvpW*f&1OA@BqF{5xmsq0E`s`bti!CPWQ?Y%c48w1h3MZ7#F&|EmZQ{fRo zr*8NjEjpA2@i6o0y(q6Q07agL7#4@F!=R(-fHIe(!n=kga>&g{3CNCba@Gp$aSXlT zyO|hcvipfLk2C0ynrt=jIVTIhDN0hE!YDegGn#~LucrHE$u@&ZW?w3+Oo$Me9`)-& z=H5cwVR%mdg`#9!@GpMF_Jm}b5PEEU^#yor1Y_8Mg8YYPq6U0NB(yP>N3_L3{s0qWkvk_tEVQo2mGS?1TL2&LXIHGBA7TBe_7Q=4&A89BbBQWOI%Z*HVpj3 zLoFt(L@XH_A(dQIw@}!?*d8}6m`D;Z7Q>u04}_IN5ogp*%sPQEk#*ll4NbkikD;oO zN;#^XU82S@c|X!YdVH%fWFBCwG2sNbPd0#5(S&eWK_DFeMo~6ttrJ0JS~S|JqB@vU z0dYQ(Bnp)qyq%O+9s;Db~X>p}ki< zPgJA$w}0VGOv7B{}V z^V0mb&PtdZJ$%{PSdMJ}DdD9-b}eRrLTfGeU#*ia$RxvMxO;r3Dh&1CvGZSMKY$jw zbehz=tSj8QBqt|5Yjug0s0=K*)1Dx=_sXEwMtzLcyA2<9PIou0W52r>;;P5)??(6sC69JKg(G`QiIi^KWSSTT{m_NP~n8Q_pn zIpGuUj3Sn@l{nE8yo|PIOJ>}$6lbjfy#LMfHTf1{AhD7PW|!ASPjU2Km_IkY#E(Kp z8@7MeGY-H^!X>!!g)`?uK)`Y#ZH}9v@m08vq)t-GdU7U;7GEdpHScJ{v}26b?a0}m z?v)v~)W2rBpR`hK>F@)f@C6cA1$HAFL+Mhc+&%3S zl`)NoUOaUM>B(Gs4vDdEmAHQ-`Cf^tpF*bd2hu#d=?a-c&UNgvpGFl$MU+J%{#RR{HORuZ}+L-16nTGso6;GH171^^pP%q+fX=rg(GO=~TKj zUT4-~7kbVM?$c2$6^)YqmfKS$RYfN=0XjcY?mc0=a$QR%r82=Vwd4*ATsPLX#AUc} z1Y`~7B;zZsccD1FvsfRKhGogdm=$$(GRq^%p!Z8lt9hT7s-zI;9xp!o`Kxq#>Gw$k zvtod5@otrlAPKOwxL!`r%3K*bZF&j&u30_t}v0RC_+`uI`=pqgni);P! z`=Z0+ zUas5~@<0f@pMGyC``Rp+c$Se$l;zD*hwbqmGCtKZxYIV>MrPnn?#J~zV*b9Xg}a(6 z0y3-yjcTpun{QXk8x~P4+GPY@PJ*9EI?iOAKPmIiZis&KJUF~oE>-#da@ij={#{KK z$L%|ALLcgM`fD`6$l%#d-9bWM!>1xuo$jrx)1}e$kJM-}uf3d%%g~%l=L?nQ`__jW z!x$0!s5O4S%l(h5fVPGWJ}cBplm4=z81?<8%l#4{5|7Lhd3N5L@MR0c&a5x>H7j&# z;#9@aiPJY}_r@PmGPy>5=F&>3)O&|R+eqeKd-)z~(|g*`Q|pn&b7HpbEjBjm&!-)K zuB5+2o##OT~x!A%uFHBj#juM^X+M!ZP6Ys&bF@79cXZuZ!45FA7L*%*| zv^%|fF&y}nlF?%eLhnYUK|$ZCw}`Roeo5&zOj(ODMwXUhvN`qjL7uoaV|UEM%+O^a zl>JcUi*|NsXmY*@ruBiU__rToBu4VEA3)v4z2Xl<@ad&pKx0#UqWt9-Aa$vMp5aaD zaSvK`#!AdRrw`AU?&9<(N5*|S`YfMKrg6aF(pZ0Z)@a<}U)q_@V_oXE3#WwyaFh0t zZ|u?;lUpd7WC1i7?q2D2(9dA*J|4Fd`Q&W}>+WKkP^lUBtDm`25s$tcJ4SoegvQO|I(-OEbPw2Qfg3d;apmH=R-wvh5{~;fz$l5KS8nh2j zwCyyYf^0HHibrvQU8-uWkkf;mt#p1RNAb0u7pin-MgxRa!7$6&Bs4LqjDWc!wQ~p&1f#!tafg2{D92D$X)mWDecTW~uNb8ir7O#`wRKrx)b@B~ zYoVoKPSxch2_u(($6gr6S;fdt|5Kr7L$59mbZoy0x_!@KAJ;1hncr9H%ulr%P2Py2 zBlw4VIZDJ;a_0W=cCF96(0=6xvP*{^{`NOmzwBbfPs`7I%|4fE)pOc*vn~Y>TrJ&P z=!J-8sna16(r{x6X0JtljM1FTs(yTROjmd>@{~-^R3&X}W~&Q^xR3SEh-eK+QNC|V z$}7thPk$~|vU3_TlFZ5tf_Uc z*ow>UcOoi=LKeBw4fvvBi{QLd18;g{{!5S!!Y`Cg+kVY|6HPTyZY9CxDXj252mZ(A z;^6@KHIuI&cmBE3|DKy>#^qu@+mvaF{B&8rxqrQ`B)ZTZ?l$_Iib)_$U|(i~)NPA* z1;~&>H}Hb?Le>atMbr%|TNu9kzf(_6?e%rvQYQZLkuP3c03kE57Nla-zO&`8IT$G>@+N85sZI-9!3x~yNoEONqzggebOx@u&yFe znWt{GbqCNd=Iyhd}70R9xe(DKDvocvR->60gAdbNcgo<^uc?Bjlf%uf}Xo&b9h}W zFC^mk$2{qW#==!CKs0~q>H%~wlAq-8t`A5!Nem{}iTNd@C)%R2-Cg+y@K?p6)&;me z3?h?0zNBpUi2H$nzKlBI2JG8c3=}&GG3vVHcRHJ)keG65(Re3-mgtHNeWeBt1RMhN zx&SJYExQBJ?&!+S;(RYOjC7Wumh;%28P784aiX%_RaUpZ+NJuPIOcIJfUtaT;&=FJ zsg@z1`um?S5+hv*!JL!QcGu6liON#Ct`5h8ToROH_i?5~cDF|hv#Qz1(TSNg+g$VAVYS*$_ z+OVW|pg#E%$iIos>E2!o;M@b_!h{umsY>dGK7(5|nn%etz+W`tFkt){p}bta=}>gf zN9mP)!Fv$4_&bPy1zr z$*By?X64Edp=TlI8ZZ36RC(>22@wbfcUkXnM9R-{jG9dg4EIkPxGGm^4pN9YuZDVU zmJ|(ycL&#F1+M{?s~1G_#Jpc8-^Zo=8UR>Xm%XMkXY?e|q9CeK$Y+d@9Suk zYcw4a!tf|8`ney_`1YoDTv}e_{sJ2GxvX-*r!$;EGfCVlm~Ss`^?xmNXcLy(&k5}K z4y0mmdh5`81{niAj%{uMkJ8){l~e*osGPZU%5+y{*W_UpX3b2hyCeQoh)4})a_8Yxv z+UbTOUy7WhJpe}rwo8p%`tPkO8lNYW@>w+wI7x1rgnJO+7?QA0m+48>G%Eht{n9YH zZsQxliDxOHJp0ECAkCqo3(W+m8zEr#%1S5Ul0GMux8yD%KVA8Gzt3 zJX*)-*VLf|^Wug} zjxe-r-_G~>V$ts3;PhI&8#{_A(SiopEV0P%g>3A+8m>_78-u3Ljdf*fv7nAXo1M-G+F%Pv&I?{UhNuY=U@sqvsN zGNn13|Hl}QmxQ=6)k<&f1Z0xZcQ44kxqUU7r3Z*MzmA3PIQk#np}tY^XD>pG*NXJv z6~;2qI%1W#>5a(Yuc_i%raa5A|LZe!Gzc?`>|iD;d~98wIQW`nJI{c`I;D)ls(#aX z#)RyliFIP)Z-N)yLpO~FTahUmf#Dvi(~yrMcqaaT&Zj#nqW!=@fl2?SyGo)`F?}7E z>l`q8M}eTidH419h+AfTanvWaUeFNPFG)l_$M2K)dJOUXR!OYAJ}PIglgf70~^OsN!N#n00R z<}X^!rndK(FD(W1a+A$0A4jjVRJLkZB2zs4SW`op2agOl-7bU_<%*w7R~RN26uhmj z&9EFj^$EV0sjb)Tk-R>rcl?I(s&C7(Dzj)QV(*p7fvosqOO+)^#EMJv!Y7*V&?PT> z7TLMEp`U8FOEw|CUzI!h4f44!0neEAmK&S{33jat=Q4Yck(ML!KEX>ajr`WfYTGg0 zRgOV#wy*qC&_^)hz>T^Ts_u27`>L?&u2^M6i(a9t`}76phR?L{2Wnu zc)nFhvZra#?(5x8x|~a-Yf67kmrJ!T$im9^xo9hqaa~0CGp^aXJQW;d5mJG->99k| z;*lG5`Dl71RUp683-N$8=@(+&2|x00j(aw6FKNF7wmHiGW_AcQvn|!a(-j^CaXRI`?&& zpnIv|&5jkNO#bPM-4<1o;1FAR(jA9m$&FO#b;#F7>4>HI1bhASIDB@UK)VX_ zLFn8ddrKLWP|tvyiWSQP>Tvw#LW)2MdhsN=s(F(PI zE+c?tDe_bc{{W{>69ZEaZ)2!bD0#(vsNB4bGWCU}kL3~SN_|l^B8ACeRLd^pCXzZ6 zl8I}d+MbAn6hB%m-MWo*`$L@Zc>0I&c`O=HEe9D)u#?zA#Rkb#m?yA+S;d%-Q zqJw-Xkh}7U3?}72%jmgHG^EJ;AFDQ&S`BAtO$}``E9u%y7RQSaLNBt1I+xA+ZobyP&uRq1U1(c4gudI_8bTgQG z?_4OYpvD-yO{&xD3@jTlZfCtZrqQ;vrG3k4AJCtnk}cXcz|EB^%&^J9rQYZBU^YN6 zo0FJc(e6H7?{IX>9jmFK=Wq?h9Ly&UWwoyoqb1C>TRiMPKUT6Zy^R${J9+oPC-Z7d z-fp?Hn4o@j@T~7s<*z2%O?M8kfeFR>4!WNwt{EscCK z`-Ni{i37oBK?WbR*lL)UC=FnZ>lm%l@>;(BxG5_eNdL2M%xCm=H|=E;y-KEyF|YXt zoHu5Z-i}eftl9PJm5_Fc z-pr~5r`6Ey&qUT~w8t2pFm#|S>M;l18RpjDB*w`67>@t>r)*}^3Af0#h2i+9d8Sq7OvuP#e?yk~?$#tw3H=@Lhy| zFXe~O(y!S0;q9q|sM=|q2;}6J@t1mg^a_{5`~xlaT_LwB^#=pH-#H#;j_DASsP++F z!-A8uD#i*n9=AWxX5vj;^~Swfm!lkfdhYZjd&D5~cPV206F?f>_~@nTJbn{@e`uih zjpiUM2Gw;TXSwa=Q4i#gwrYPQ6-|cEwC_u%q5Whq!62DwAmPpO_++Fuq3L<|Ala=L z*u=$>?_%$Ntm%%QiEU4V`C}i^R+O=Tb8BVn@AAxx|1z+k0^xlMPQle13}A2QEi_EL zD=jP(1hiYt)tQ><+3^qmq!L?c1s1Fq`J!1Mm~|!9)7|-O)Z$T|!5X#{>~KCJi~YtP zqfBnX9`kDkIz4ZFc>WWB$qP)c!tM}FQKldZZwbTGE(rrW5;H|!k^`(rJb){J(a*Tx z7D zW*&;IK(QL2=_ZSBR7>*-mx#aW8HW{soR-(`V}x-sd`YatP^$6Hc?~mfmgfWNk37s- zX;Q!k;_kT3UmoAK_CoL^#hs1>S5hcx96+P}GEQ3$u`iT{&Zd-?+}07VoPJzxjJ(!2 z*)#%2w|`)tm9s!u?=gcy_w^ck13*@Zp(6);Ubn>=p8)A<&>u>j8pb8lWh*u|y*)^8 zjJJv?<0$(8PiD2tLCN{;hwAnRY;Mqp7tcAi8W-YFdZex0~T)uiY z7l}iFYkvoRZzPFr(H3Y(tTobDM_5K2=M4m?wmD@ULbP98pQXJ8nZEJCSnC1v+&G(b z^Ko&Wk{379;+j-_FGTYAK0=yXqVxZ>_m*v0Hc|UGARs9v-Q5k+4bn&|AxL*fcXxwy zcS<*ibT^0~-Hp;6K675zb>IJQ@VvTTvTY~B$sEVbtXXT{f2&V>_I_N^C*`IxgALIJ zg*#z43Z=z?th8NWD%1W;Od-Y}oEwCd-!H`DNUVjIXX8yY6_7B&l6r7p3QnRjEy{;2+g2c!z~L7(qY zZV=4_48}!!yaTKQ>urA3gk=%iqr}rsoiPwL3mjZ z0Vj_~O&*)Yv-#%>w2-%oXrqQcjALK(ybcn3s4Qg+INU`1zj_(;^X@F}b*FKJ9s?vm zTAgr`c+sENG5XU}GWyt|?NsJ1Z-HGOy+DnoEIbd0ZP+~-2kV{#fXDR}t9{7fF78}F8XCigfjR}H8lV54VWT|)Aef*^@OH<=^HH}W4 zKfLgPbD->EXJ|&|gO-o~fvslOVqoOO4r1ml3^uxT_^AD4{LPQ{*iOvUTWmyimE5s? zvaUesPK*&kiY7O+Hca5fA{bn40Z2^}X>IkrZ>w1uZiYYAU! z)%dw6JdSuqz%4u^P9#cUuaDzJHkpji&!onbVga*1L zMe{i4OA498EU7%zlK5zE^We-yxv1ENHV5??M!ZQ|Thev^wO48lux!_to+=GR?5>zH zJRqHU@JD2_G4vE3HFPOZYn3gpHRv?_r3Rl(?p{D6Ry^%_2$^4tzW0gnKo5ROacs60!nVjNtMz-|6;MCTrV zRdkj6`sv{Y7r;>vh!_T#QBWn2l8&G+ItQapASJ%B>y*YOl4rxy_3pBm?_v1;V1?BG z&FoZ^*RcylFtnCk*qXLmYOpByqZ?M-y>KzjX@+e2F=A@DkVVLklh;XWT@%I94w8+k{M7ACL^f#l|E7wu(@0STS-T9bvD%;-t< zMf6nd^!^<_K+C{!KAgpfCg7G+SChnu188ixl z&3*=bLm>(^qrh-ay1P~i)WukVT ziRJ1c6|26Nlq17Oi^(+#ip__o7|xpf+XWihOsD72->w*A=%ef}(Z} z5&6z#Nm^1Ce1587?DfUQ=t4$%zbhUP_jiEMPTfi99Sx*xD4I5?hgLgofk8KIjFd#~ z;)PWAMO*9^!6;vzeY{9VO`l<24LG8&Vy}gRq;406)x#qZf?g|#B3ogmSG;K6K+kOz zAc`5I{sflM?$=I$FH47&w;HKnM39UeZ(jU<^33^jpN-i+G3=`$4{B5M4r!gn?E=pXvmyP~Q> z7Xs*oY<7jGL4*?(W5eQ*jJD7qtWU4LYnU1` zBf~crucbZcNk_(!i8#K3|L5ZyW(SxONj;coOuV)Sd$|_@00`?AR3l|jLmUZ!Py0XiDrde!DmJQX5*V6WwuIy zAJ+@hZdjQM*YKF#58^SPT+`!bmV$#hHUGzy?Lz#`qa=(TkQN;TV@?jkL0d97PLEqt zA+}&@wmFsY#}u(bD!>eo)7qrnttrrNcF|bYNJ6owAmMJBhHQ{u*w~`I-cy8p3RLSG z(`SIps3n>IB|M@!#Po#Ze|C9{m0Qg5;5~-(#ZL*U`}fXFCW2CIOFrcPN&^3g7Cvis z78cBF2FICcYZ^pNSQ3wY@_qxb@-1^9B8XMQsqNoc*pp7=WaEBX+|VPhY~rSICNQ(=rme0EZ!pmzMHHkL$=chP=}0eK zWpXJ%HT!)~B&`6VOr_!+_nTRDeotZFoE=T+(R$(4i`j2BiwziH+iA>*vH_r}i&?Lx zBv^6C6qwVcsU#yRtEKI%nk=2mK_%E1vV{-xzNWe0$Ncq&4Lop^K#GeYIPhFr_f6k$ zNW)$Z^MVL=TxE1Y9TOo3;+EkY*NB-xNu|Xi8((WB*etJgsMr$C9{2_mS4*rkFsvC` z#}^LyC(c;(;bz|YrH?dR;T_4t{6vHlM2SHJ#X-A8N3cu|?F-WHmxIwCo=Bm~8ixx8 zLefXPo=~J1SHY4yf{*^mj^HEJKNjhWkZZ#T6C96tn;~R>o24e~AS586O35}8D4+q$&a!7r z1Q(W0eK$X2VF(4i1(cFC81Wcn@_>#OmIgS-^n>muMAM-yU>=Q$rXI6a)&hx+%*h8m z`5*3pG87X8hmDMXK^8!ph|fWC-6Sp?C~f{1`I3vEtX8J{ITSXEk_~c6D*;R`wN!Pk z770i>Mgl+6pvo~$o6yo4DHNJJDOj-XR5r%BA$Uv3Td3TMR^_+Azh&%6PzDEKULW9Ln5>ZAEQb&aqt`yonC)Uvw#6qEal$?EozM3#fjJ*gb|2 z@YpiNkO*0t)d4NtbgQG;U;J}95VKA~mRwW>?HADo@QtF9Ssf>Z{VjeJ(;J)vq+Bjr z+7pWzGU`8Sm#p}Ka+yk0?l-_U$epxNi^53@c7EDH&s5{y;Zb~={nL%!eAxt=4dV$2 ze`*BibLyBVvPB^Yiee7ERVHRa*h{iqAoQgT9=3L3)QN{|Tg9dI3KiwEi^@h=J}1g* z_`*NP_(p1D1bJ5j<52#BkY+vt0Ol6(DHVW&Hx$p6!)t6T9##%AI2DPHEX*(l7@Y5# ziBAajbd%e72wlS<2(cUpi0{XUf#UgpTovTd;Ivl1?-_`6kQB`c3uF^}@$%J}$`RPa z*h0Ys?p{hUSW%4oaY4Ub^5*iD9-Wu}1Gce`F|MqA`68iM_W5RGm@-|Az}oly%ceWT zI%~;*6*QS2O+5vay&619eJXTdWrhP#8Ni1v(5qY8l-7oqyelh<3a!_n2V)7uxbldf zk?7PJhvRXX$LZ3|AH}2y$x}k}fGht>>1nDN%sKh!%L+4N|96GT_;`8MVCx&~Cpzgs z4{@2!`TJ-#oh$YtiS)yNe+eR+V!vccg?+@xy;~yX(MuwRx$nz>egXekD)ZQhS?7x( zWPV#N6iVCvnO|Q!Fu$yhd~+%cgP!fjS2LN#wgZ{72cL6bzkpB#%wigymay@ipJK59 zz52kQZj5u{&>0D->vyV>GKk?iA29Fk>6d8%^dbz*8KS>gEfqREbb4oR?vUV&c$dXt zF@WY zGTuWC$;*%dyAB;CeCE~hjzluruWiqCagzpba3afXxWy(na=My9Ii|N|1>L4WqMiiT zbMG#KI8i!S^XFG^l$`8QPWri7{)ymNsmF9tO^L@NOX(mEx+Y;WH>D zr4b$RcAZt#NOo{~T$;0cUR!@7f@g~*;sMdh8i?p1_ERxY$TV`Z{$Cjr2 zsYVmBX}}cW$_a+Ts>*<6jU*7Q(S#`4TNR4Xwh=cv>BHMySfWIRLwWflQQi1WEsF_7Jx46TbIIc|H39f;U6pO$eCf z@GT+UNkcx~CmO)=VAKXevtuU`@aWirz+n*L1p8GCBF8M|kZT(gV1lvr0nYB;{829% zGHu9FIm&aCjmf+{xLQHyxv=| zDfaRQ{UrAX#%DE%@tHVCG?qjtkqxnP7?@GuhGXV|Nuy5zXj8|!w1^O?6u~^q2z8xj z!2&j3KF!9uz^$TTCPZ9V7z*~l%S&u4ImnbM;}gi4zK(;U0c8MQm7M_Rwxjv#YGGjC zk|868q=5=@H03c7vY>#*GoGUt39%hi0^qtwK@pItMaeZ7-JKs1PuwrT0p%d9;k|^C z`Z`r`!*wotX6aVt30PzcL?^4MN&Lv1ZLFP-4m&)~;WEqRR{>xahTbzYT~(wLMT% z2s11YYJAOJ*%Y6Q?2{hb89X9qTP?#Y+Kpelx3M7U4JO0#TMRa>|7S$-wx!TII?{ra zaB2Yl%B0{03hhQq!KMnP3^w$DiHwG8zs`c|n)e3?|3WPflq~7p^+0j?c8X4RD2la6 zIXu}##EJI~D%(50pn9t5fSv^O-&$?CXG_%kqzs?Ig6@{oo*9<>` zs-dL!E}#I4dLT}PLhiKk6y>C6$WL1mN}Eh&59~44k2o_W<4#I- zXT%C(M$pw#oa!=kkckUQDFFzNm%|hZ^M=d&Jybm}RQ4x~Kh&%6L1L`ItdpVxG15A( zE{{CxT|Rekv+WQ)+bO%(bg}dyS5Qi2%exO3gkZxgVYu8&+8#E`+AsW-Wy`p%G{i^8 z94}0tua9=c=3@@G=G{;H1_-U3woHN{ftC(-XIjiw$M`fb#;0Y(NrJaW(CD?v#)lsN^F!iA8(_xiPTSI4=SO(>kBp+^1a6 zbBu7mt^*W_3UMymKcO23_6_AAJP?ubM>_yy`EhQ3gv6lJ{ z-6@)GFCwT>wr5XVsEU)ZiklQI z4gl+HQQ^yi%3`(Dpruie;H0;fj?8LJ@cWMib5JTtO)v;dyaL40hRlpKaWC^QZGz^e zfmY5bDUwudVD!*--e@wcpKN@z$vT)aEW0l5Ex%U!hf$FZi*$ESRzV)Y#G z%MKtxl6?<}yt5}p&t-vQ5NT2Ck8@KrEE34hGi`UjGeL;LW6vMM7gs;@eg8%xTc66m z0y)Wzo&k5Ch-L*qLGczu>@|?;H@gq^rwd%-^kmUE&K8Z3|PWA{t(mYipswrFGlA0{N9vWHSK?wMw6Oh&nb~4*fCeOb> zYyvmuk^-!&?sIc;ILlu|^}sBQSxvq)=OM}OA6s?5p+)xNzDjvdp)O~j`o^O_0^4D} z#imx42SmBtRaN%t<|B#pkn^OR4(Qb3fB{8wFgBxc@;RJ=nEpEPR{MpL&+x zOQOM|qgHQ2OfZieHnnRQ#AiX?{UdmXWXOE;ueU$!M=P*6c1Wf_8X2$1ki7+`uu)J} zQU3DSYCjjBmL<>9X0#}Kp6bmv$6e#Pg$DpRVlMsjq2oL$VAA5AELg(&JJndv$#g;J zuLg?*e&U{OVEUUi`vZIkAd3X3LVx1O>9LT*bd88vFE9d zP0bl0!XxLsW1hgX-}IRm?0$6%6#9Io8%I6cz!pNUkW4)Y7`DEU$*G={u&QNIeqYCA zWR9)>b^yyy`L37rdCF0U2Z(#`mU-XxS=$K>LGD)sW@mZH)NsQP6Ftq5VOp}2>~bi_`i!Mlo}=-E@Lrppy@S%wWK*oobesS} z8a%{)*BuY+q`w#!_BDcQ+Qw*k^N<0wpcaZ`LGL>wh6*BV{sOk5g`2O|O`RuhF zFKuW6H89FO|B-ij19;iT?_hhh+4sx1Y_t1nlxg^%dJ z#^g%!eX7rV5j`#^I2OxEz>fH#GSvpj^z%>jEUenyF>#WW4?xY3=VGjlbI69T)i5so|b)!jQx_X5OZx z`GUFrku=RCxhh(S7NZl6&Sak;?r?9?nl)*(KtVR%Bc_w&q)Cv9yki^$t`1>f-LBu;J}3v9zzZ@ixm2S56iHfqM`0r`7Legk$l`A-g?f2|64V@U&}uT8 zT)Pr`_U#U$63tlrP^qnvdp3tCH%{GR zU^=z`X@xisC%Nor_l;hhV1RV&6i~8j=uA1JztbwF_aVS|m=)WT*0-n0bfqJxm{i&HvN-w*(df=b_NevRXVm8xyk$ zwGr|5h7~+&tm75!p5|cd)DVR)Yx-l$pa7RPNWn;PGs>Mh%3GNiMi5xu4)1q@l0d8F z^*RoR+v)l*g$09x9y`<3cK7QXHBVc_NvT-j>fP_G*zvcA z@}K@KMiES4ZLdd-h(;HcV|X_JE`oM<;u_ZN1=;J$76PN}&@ZwIa-P`q=th4gtl?sI zeV^uYIpaIJ=~M005zMHglEl4sa}fpwDWy+1Hi#rhw&_UAg`1cZks7bibY{EVU-b8) zITwp4vO6sB=^cs%*h48s&u?F|y(OWEa&CYYr-ThO>>R8Kf`6=fM2MTP>SDGQo45M3 z`&;lbZAo5wHK@$+=EbAwzN_7JbpTA!!V^}hiyL}NTGLrC%279VX~1HCOZe*_x%sFq z3mhgHqO~s(gEAe`IRwp@gSd!#Tf|q3sb@!j-yo8+>_;YM=x@Hp`R?=T&_$oX73(PQ z+2#7>J=^4skYMNX*fV-f+86|NJLZGJ=l;OxyQ_98G_^ZBE# zZQWFL`nCSvA~GktuvT*Bh<&JE`cANwQpN=xrL&j%>X1aWP0~jPVo^w%;<)|DLz>By z7UoXL?snR*&W(^<-TCa%h>L=de#;_19D6;YIFyX`iC%I>A@z~Y|GlE`*`RPR#mQ5^ z8obYigSC;o&+46&I}IG5Sq&sMtNrRhWw1ii%yse{a_A9N2psb>T)tn6+@0_%f2ayj z(+OXD!^?{m^!f=p(p%8U-YysYpfG9KY%kKc_F17S4L6Nysblxw z`469a>bnkJN#$`^9u{4ch-#(BPOtGRe!R8ckWLQWeqHEJIc!@k5G;%E&fRgqmBUY; zIau!TPth9=*3Q2T(Jm$M_&rn)44HaP!pP_6`S`ECe|F3h#b=rP<>1)ydsijbVO8dQ zh^z@ZMOjQq45^1IW`|u2%AYLX%A&F{+i{eV*{F*zgdQLc94E~Dy%vxFaV_BCNsh~@O2YSJ^`@F)T5W?>{aG8PQYLaOo6}1{R z#_<-&9U`mHE zAV4WhYh~n^joW*e_w7J|PzrMZU!*MAA>q&A(6{k={W2qHpcq^OHjK05KW6@cYL73h zHp>x~mi3gY!A-S%I1IG5{!;a4P73%Dul|`zY89Uj!7yZr&MYg?I zf~#QWi=+U;S?1>D{ByrtNdwglWr5U+TK#UAEBC2fF)nXLf`ebvit3+@hK`ImtG~mI zGsEJffl?CX=T3dO7^r3LLLJPpYLJ1Ksp8;cZUqTCI9uggT{fwAAOp9L|B}T3yaI-R z2;w5#A_hTty}G3?J*AjQX1WDQJn=Au5II`XR&smdtBo41@p9|Us(QetbXh1xb2M5#^ZOG$*&`p7 zgXp-F4$I!^7eprCaot#LupM-MhD1Q{J&6%-|n#bmK_H2X8{^NM5F*C&U(%M9+dhct3Uo|cfaU^ zP(UEM*R%bC*a!f5|0R5qsHRkFlueN{>HWvC3}G+$0r3x`19ld;Z}hKisCk6auc5f~{eGA=C|!GG%wcQ0e9qKwm+&>bc)C`kJTZtLL6(wjnmc~Q2RpF&|; z*L9_+!volU`hX7)LgRhj5_y)0A{KbhW&LX^=JyiFz00xiY<|?0m;E%d-xegCI9-dH zcmYVeKd*|-X+aAdOQ5EUo_ap*jp2}GE@Ils3nlsTZJ9t^A7Jv!X#i^q>whc-&?G;mkJwRlZ4t!GZ}{AmPiQR)%?d!A&#?fX(jHZ!1E41aVCmO0Q7hd#CI zaT-9(J%b4MkyxyEyTKxkT+V8V0ZYe^T4v#oZFM=kPOnF6B)87jG>bJ8K zn9u52Op5YJJ2@IxBaGE>-n`nCYU-A4!r`{&E6y5y}(x1tA&IeCqwB4_8n9469=M_&^+7^{ zxe-KWcDoBBhj#E&%;wHNWB%LnE0~37ab+`9xMqkSR~h!nz{og9gytl>@c) z_lR?ZE#pMyzfAS6cv>8>ey~jS35Qstm6ZCn&n(S7CF)ww3FRbslymZ-)nh(?AJ#r- z!ASiqluhu`CEb<6+j3|(8U;O6tTZV4@gHcYtkU!}XqTG4Ffn|2dpX~B{!B~~}$46X%oWCNWK^fZS8XVrTw=?(yfglNKbz3b! z?0)+{zou}I_Nd1oBJX8;R?Rbs3!Q-cl*Ziw&qwDtfj7{pr)0-cS(n075MjB+Zi=(Z^UoseR-z6^*w)#q#*p-~w$BsED)pTZV`A#JYQjw+jvoAj@F zf@G_OQ;dSl)=K(7(J=u z+o9FoEYdFvjqQmblq~@kpbR*TQ_E|}PQ|PTc-nm* z`H&cX-=bfo&cn%8$WZG#6a5@ImVWA2$5s7eU89!Uhs5s#x;y-9Crv}-`yv6+BUkAQ z$^}y-5+fz%>7%e>-8v5$2mIY$vg>39hDZ_t{rZXzwakWs6n-qG{*>hKqz!Lq%(YcW z$9fX!<$h-{Q6lT;*35Eqh%FT(U$w1caeeBXmlzGBJ}~}r#;{}7%)D8;X<`B58<$gu zX2&nWIQ^-Eb}v;3a^uGe$mk!`ao>Uh7&97e+fQBDp3|H@JFJcqExG%r0X|A$`QHlp z%&+MZa`g%op8P8XW^wt1l6$9?z4ipw=yh;1yD`wUKizGYh&+{f({6z(gib;P38{&9Y@=3&yoTp^%&Bb6*eIXb+{2ohZ?C#`TIz= zLBX*=!02e=o(HGkNY>k#g_L5ggy-8qk;U#o6QRJ=Z}BgKDs$b3+EN>W^Wm|eXI$&?I2ySy1^t_uTSl01kpZjRDRH;5;{;DDtB_jmg_kz31xbO8`mIjM;X{ zBg1s=;W8U!3Y4C#ZltgrS%1vHU0_R7GkS@Ik=6szAQHvoZNvCtr-8&ei)-}SCV9c6 z?GtF3@IlAZ$g5vVYz=muH53=#oSEFZ>-cLYy8E~$u3Nhv)=zi0AqbJ3;Q?asa``q3 zZS}JFGwkl)X5R;%f`$_h;=n%=siM9vM{|l6lx4&8N*b2mB62V$d4OJx8d#$x8!&Er zGd8PUqlqPBCW8A(F04ema5U${^Pcas^#0d7eU}^1^eig5(d;Lfm?R z;FRahM8O~FgA|PFuMiZSGmYS8Jk)AfUbJK?b|GMqM(&KJETK`wYV~;#7#eVZh(sJA z2&1euNNf2n-wBw|%^5MsTuT;~v>#Sp`3CRcRXTUTk<89T&_T<|{FwnbI^5T<)JX58 zEh(n$?v2X=67KT+{77xQ#Nf$&d7bv-YTk3HwZgNzQewe!g}Thh<5fTa^uBi~zO~%f zsX=$342Vn=cm++~`sdq;@EM4pOWV?FjJ}xk!IBvK+-RubFcjXvECp()7W|ORCDP~9 zm(xJs;9SrZ{es|?dD%={F7<^&y@G^}LM0_vN*gIN4ARk2d4{H4OL`Zps*<4iHjF|S zr0);x<~g%&n|dZNp3=PkZk@HxgFG>^Tm|k6{q~9MAy%W z)M=F(y8$ZD04blR9r8NeM~&9WU<0-IGejg8YGmsGG3{C+CqQ~2u9ybtca6gkjBpis zTpcbPB~UJ7{CytoIznRKxhLQZ0EKDm&>g5Xurn__htl-DXa3;YS2)`kr&)!%cmu@4 z$=_!8Pk>^Nc?!o|f11>;{Ve1=6f~E@el;6rPu9-H4J>oEtn8;cTnzDm_T}Ec7=Ofe zDA+`|m_PjRe>20Z>%Nn8$mSzKL(?W6JkR)=uGc8-E_2oQulE_aOK1(rV!QEno2hnM zybe4Ca~cWgvA7TX$y;@+wXc(V#mN#ydHGz9Q-9T)#ip_u1bjxmwZ7k`OWkqu$Mi`$ z%~CL$p_Fd6#=jy& z&P%ReJ0AnqIaw8dgQlYqC%q9) zT}nS}<;&XyKzhBIVWOP0R|;d$kTs8Gk0ka*hpj=H0&QbP*C&f|wbrPG2uX~v)5X&Q z`q)Bze*@`P#v# zN1C$K?MPRdb}{fbBkx6N-eox=i-C%obH3eL$3(T^Q^lkMx2`-6BOYn6tm7N5_^Ho# zsJ#aH#iBRTfyp!nxa6cOTOP91SWjdhDT2`NaS6;WdAwP4YiWE=!Nr70^UbWCh#1{d zH=2)rCy|GU|Ub(G(fADX;>tDfyAJ@<7a zs@>hzxDRZ!6wkUwy6VZG025{QthUx1pBfYxn(5};v^{QdF5iYJw)m&%Y*}JLB9Mdf zcZm={{Hsvl;JB;;I`pG!`x-m+Q#}pLWtk?}D2laM502Ys-DIQFCv00N@UhfF$^Dy= zsx`EgaNQ+0C-ZTsn~60wSx*37^DF7sOM|vIqh6U)5`4GdHda2HTkem#FTvk1CcNlf z3^D^99p2>^_oIr!Yyo{7Vo~Bn0|zuIm3L_ycB{nvS=Q)X4q~&opS-VrwowPqeQeqG zRSjHn+i=;iQf?Y`9BnO1a?(C+&OC#`Z-2U8+7gP~Dq@lnGwF_@sHb6|Yp>+POPqG= zPL&NjH1kf*%aYdW`>cNx*>qE( zikYF^oo31OU4|JBzkG*^Nd23G{GF_g-=2S3OL|6!x8+TpIFSIKyNSE^%$pU-);YTr;2+;-1+Di8$+N@CZzF27oZ3`bMkn zU^{3wHSNv--ibmY9S;c)1@Dz^p%aX@(gyLtwR7J)TN9y9mX`5^J&uvbIMh+DATm_u zRB36OEYIh=z1aC6Dv>5pV~(Gui2pE%Dm9fNDkaU*!Z?}VWujEv`j;fQ?t_j%_@k#F zGxNUhtH{G~{SOx1`yvTAQ(#ZMew&k8&v`=5T7!AZ`=3w9o}i((4CVGD47rFB8*q zu3S)aJV_T+9VM=*c9A&ujFvT%J`L95ns$fIFdd5HJV$WM#rwMRMR@<`wSW*&iN7gG zd<2OG<}L@kpb%H0qFLCUya6yoM8QR*yI&|#e#Z86bUBk4;MrFledAq_D?~1j+)L9j z8cR6+GmSoYa{}thA0MGPTR50642oUgF)OnX=(o`IrXF5|q%9WI4BhVkGKwWm*;;QU zS^=G!3TU(43}3g$7qD>&BS}mbtg8?Oo20K_`A@DMo#PxQ z-@I28I%a%bhXfKYGcmD9HksrMg_7~5@%5S*@B+5Tu~KV@;Ch$qSq{hk@SzD^PThLp z$k4Fg1@op0tL?YWTlHgzEc|4%&6(6({2qRT6m8G!!jGci+q#Vg={r#WO!a`ClCCMu zp>$j@CXmobsE9d(g=NKiorULNib>6$h2j}HT@<0|W_rMRJofn()UFw6FusFMGRW_` z(ysUr3Bx_T>X|SS1RoTv^9Zman==-vPpe((Ih)@g)F^YY3WZaQAmzVaW^Ekh-=>481=I54`}L#Hx#JMjL-V zntgu;>4-{HTvO2|AtU$qe1dk&(Cy}Umh96q{b}WA->PDc;l)LTf08Z@vM)r52wxD_ z)dZpYwqv{T~pU}uSOZM$dapiM2 z$|Xp+`FOE>W9y&(`o@_}Ue9}ur+%mzX^(e1D-WkEZpPrm1mz_Mm*SSJ(c;`%j_>or zH+EU6lvhs{3cG6Sh$k{B!*W8yy7GVqU>1Wag(6KrAeiK|jK|RTYnyZJjBrn1J=st} z^^>I07Y|9Jx}}*0h~g9;(uG5$`uruo9{U-KA}0;s9*`QJ&89UUR*;s}9oNs~j;Tvo+u*6)^ZD%AzkxxK=-;0DL=`_7Z&06l{bz+x#A6vIZNKsr6cl>0 ztfZK_KO}F^!O!h-$oe$J;1A?J^LMo`kh9`6-Z4G&^CvKsPN<6gm;r&#Asa)_iyqiK zixQV-A>_)z#DR{16G7W7Klq(Fe3vg0ZCy-Bv%WoSEf&Hs>c?n;r=s5R@Fvj|8E9@% zh(}}_8k97n4ZP5Yh%Utu^xQ6+t|Uv;ekEDoCyYD*tZi#BNoX^V3Z8Flu?lk0wkLA9 zFn}=nEcYg|j82~EOEOj24{WlXqcZMnc6<+$T5|dH9>{~Jt%K+D=Pi|4%aiD$ti&6SA=~vZsmKA#0T2!?TuIcyf`bYBO(c(MmOg6snh8DJISQb#b%=5 zx8e}cG~n9}loN&S;R7I#Jt|L2D4Xyn6N}*~T)@lP>3e7N+XITFS;FESO#S&J&Ardt zzG|JYJwa_4SF*Fn0bQXNHFyTf~_}@Qy=oCjIvW;>_Q+fgo%$ts`PoA`* zOeSr0Vg#aV-#)`3bafmZSi&Dnxk7JN2Zi_99}}f?S!KQ zt-Z;R`hnV6w4lYB4hxhuWnB6G&Rx^-o1VB@Za4lrX*3gB&86|gUzsh6#MsyZ2dOkz zU4;0Ijvj>ozB{;$_4Gyn#?5UXs;_T?%L!fK2Vz&~Jvj|cj)lTof>9C7`gA1@qh!vI zXp2@@H&(-p;VdoJnY5LaBT+Pyn<$-DiTY+pZnwfhM!QvZBdvz34*(!FM1o1G2&C9a z%@Gi`W04(y&_SAd^bd7fi=hFYFKhJ9et+G;yUtH-0=OorjRaW;99bWhI+wj~R-Auf zodjdqFVYC|i3lYov>M+q%dhH74)mela=1)|-;6$mb!QPI^l`LG$o$dPVI_^WZ7YDY z_s3H#zJd)aQJUBC1~z(>V6E7)6gvnsivK!`LxCyBf)DmHQz851aaR$0C($)Q;^@Zm zh%Vk@k25KjM8K;O^(}*GC%3>!_`X3^)z$bs`s7y;hy}yL=J5l6W4O}o>0fu`z1V5W zx5B@=Bb8XgyJi2q`h7|2fimhu2KT&flF%Wh*=|MaWVwczvcs zkQgs1HnIbdHi=Tx*^w>{L+4hqZd!j*p}gTf%iznkN9& zrV5w&V;mzY<2d4S;EsR$J9`e3Z$Dx9hc781eP-=W62Ja1G8Ag5TnG(>aNbem{p96H zBS$qOQ^G-8LC0eM0hX!z(>_50cZ9Xb)bP7ewpC6ds1Sz6lDc0wl9R?G_ohRog9dr8u@8*VH869Q&ei zz&I4HDzJjqeJX^gd=_U@)~0(>Kz|KR%Fr&=!L+K7i*QxqtIoabMYLx(SxZ_{#7E>ZR0V-gZ!aT?e7xKwgyaj zd!I_W6rzHqmwD7P=rku)KepQjy5w3(N)Rxj#i-K=)?2v2rV}HNNNAE67ok+jPKL4D zFyug8Y1J3_+1{--@Ihx%H^F5%9&VwajtG~!I=y2ktWnx&ONkyRo0U8f`W>zsuZ*vA zms~#WvOT~lRQ7Sbm5y&(HhtE8CQ_sB{rsr#aA~1leL{P-#-pkB15Le5>E8l)lfV4t zx-ck1rbar8Q)D4x^#=W^AN%tj=a9QZIWlLvl;6_v6uIxlH}hqZ3cM>48WoLLOD0V? zUz+8+YRU?|GPgi8?{%ZyBt79nQM^N+${mx&t&|h5uH>u0BwWh;&ijMlJp-0!ekD$P z(y;Xx+>AOUqpWZ2Azn^j(uqa4n=cwTq?~$Rrm6eLAHTM_jYED8>z3h_=c3q}b~O^q zCH_<_w!psLQJ=G2xm{G)Gh!h=$`wn6%A6D`G~FSux_Xnd{X0Aw&48@cIe(vs`@33M zdn;j&drHW8Va6^LUfl-Q@KmwT&~IIq>IP@pT6hyz>-u4LZPr+Llu6R0_oS$JH+}0_ z+p%EF&NM_i0 zPr3?SQ)BXCg4@^7P)FLYev~_Lng3{~Q2$ul{oBDjt(}L1BK`&bx5PY+QqyE=Xo8~g z5&me`dH7|f9bE~IQHt3N<-{~u_@Cv3$v;qJv%ekPH1L>I|B7Crr`2SWkY|GT2F2Dx z&~WAJri(@bUXWZR#Hmpp$=cDvzY&DByE+#aY_{;~{Kq?y%8uL3$TOjm6;>1zb&v?G z2FyGaVz5~!^qmPHjw$+`-}E4}sskO}se;KHLDiz}`9`!(h`Vi6`@`Jr%8(PR3ka=<*l-z%_>wI!) zXOw*EhbQe}d+WSp(;OW=hpKczi)fwJ)ozkSI8(OB zM<-iZxy8=U@_&N1W+5F?z5*tZk~QP8b&}RCH{Iy04z22sbsrGU`~;0{TTQKc-`L@V z{QL@{st*v3ZnR~F{gAo{^=E zj*Udgm?W<&c@@`Xnk^JJ6%Hwxeg$`8z}d1=970rzfV8LZ0BBcQ%P3b6lL{G02X2sJ zVPIbr!JDv`kH{-CAQ@8qkTjWcg-agUGDB>Tf(oopiM{uqZ=rFW+vp`J-jVP0A4&%nW zRXAv&*L4Y9z#|kx(jRVqmI;y)0zMxdqY{|zIFy7VNxXeL!3#8>DRMv>H3avnw}H&U@+tSV5yX`!V)Gy#;hSPw5B7s7j0UC;CtakRpwNlx#dELxlYF&v$(Ve$BzDsPKP(jZ7YfAW^GaD({~!2>F;OA1oK$=ycK- zx&MBN%%2I-T-me?C-}?%j=cjmMJDMFatvTP|NU4URjycCk@jo3fBzErN{JGpfptW> z^kx74^WbCu_l5p%hW_sz`e(uX-zxk6aUWXmj3O2;;>vf$2Dm~fSt%vSDslaQ{}1ac Bg^B c)$, for cutoff $c$. We assume $c = 0$ without loss of generality. \n", - "\t \n", - "The following figure depicts a causal diagram representing the assumed causal relationships between these variables. Two key features of this diagram are one, that $X$ blocks the impact of $U$ on $Z$: in other words, $X$ satisfies the back-door criterion for learning causal effects of $Z$ on $Y$. And two, $X$ and $U$ are not descendants of $Z$.\n", - "\n", - "![RDD_DAG](RDD_DAG.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Using this causal diagram, we may express $Y$ as some function of its graph parents, the random variables $(X,Z,U)$: $$Y = F(X,Z,U).$$ In principle, we may obtain draws of $Y$ by first drawing $(X,Z,U)$ according to their joint distribution and then applying the function $F$. Similarly, we may relate this formulation to the potential outcomes framework straightforwardly:\n", - "\\begin{equation}\n", - "\\begin{split}\n", - "Y^1 &= F(X,1,U),\\\\\n", - "Y^0 &= F(X,0,U).\n", - "\\end{split}\n", - "\\end{equation}\n", - "Here, draws of $(Y^1, Y^0)$ may be obtained (in principle) by drawing $(X,Z,U)$ from their joint distribution and using only the $(X,U)$ elements as arguments in the above two equations, \"discarding\" the drawn value of $Z$. Note that this construction implies the _consistency_ condition: $Y = Y^1 Z + Y^0 ( 1 - Z)$. Likewise, this construction implies the _no interference_ condition because each $Y_i$ is considered to be produced with arguments ($X_i, Z_i, U_i)$ and not those from other units $j$; in particular, in constructing $Y_i$, $F$ does not take $Z_j$ for $j \\neq i$ as an argument." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we define the following conditional expectations\n", - "\\begin{equation}\n", - "\\begin{split}\n", - "\\mu_1(x) &= E[ F(x, 1, U) \\mid X = x] ,\\\\\n", - "\\mu_0(x) &= E[ F(x, 0, U) \\mid X = x],\n", - "\\end{split}\n", - "\\end{equation}\n", - "with which we can define the treatment effect function\n", - "$$\\tau(x) = \\mu_1(x) - \\mu_0(x).$$\n", - "Because $X$ satisfies the back-door criterion, $\\mu_1$ and $\\mu_0$ are estimable from the data, meaning that \n", - "\\begin{equation}\n", - "\\begin{split}\n", - "\\mu_1(x) &= E[ F(x, 1, U) \\mid X = x] = E[Y \\mid X=x, Z=1],\\\\\n", - "\\mu_0(x) &= E[ F(x, 0, U) \\mid X = x] = E[Y \\mid X=x, Z=0],\n", - "\\end{split}\n", - "\\end{equation}\t\n", - "the right-hand-sides of which can be estimated from sample data, which we supposed to be independent and identically distributed realizations of $(Y_i, X_i, Z_i)$ for $i = 1, \\dots, n$. However, because $Z = I(X >0)$ we can in fact only learn $\\mu_1(x)$ for $X > 0$ and $\\mu_0(x)$ for $X < 0$. In potential outcomes terminology, conditioning on $X$ satisfies ignorability,\n", - "$$(Y^1, Y^0) \\perp \\!\\!\\! \\perp Z \\mid X,$$\n", - "but not _strong ignorability_, because overlap is violated. Overlap would require that\n", - "$$0 < P(Z = 1 \\mid X=x) < 1 \\;\\;\\;\\; \\forall x,$$\n", - "which is clearly violated by the RDD assumption that $Z = I(X > 0)$. Consequently, the overall ATE, \n", - "$\\bar{\\tau} = E(\\tau(X)),$ is unidentified, and we must content ourselves with estimating $\\tau(0)$, the conditional average effect at the point $x = 0$, which we estimate as the difference between $\\mu_1(0) - \\mu_0(0)$. This is possible for continuous $X$ so long as one is willing to assume that $\\mu_1(x)$ and $\\mu_0(x)$ are both suitably smooth functions of $x$: any inferred discontinuity at $x = 0$ must therefore be attributable to treatment effect." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Conditional average treatment effects in RDD\n", - "\n", - "We are concerned with learning not only $\\tau(0)$, the \"RDD ATE\" (e.g. the CATE at $x = 0$), but also RDD CATEs, $\\tau(0, \\mathrm{w})$ for some covariate vector $\\mathrm{w}$. Incorporating additional covariates in the above framework turns out to be straightforward, simply by defining $W = \\varphi(U)$ to be an observable function of the (possibly unobservable) causal factors $U$. We may then define our potential outcome means as\n", - "\\begin{equation}\n", - "\\begin{split}\n", - "\\mu_1(x,\\mathrm{w}) &= E[ F(x, 1, U) \\mid X = x, W = \\mathrm{w}] = E[Y \\mid X=x, W=\\mathrm{w}, Z=1],\\\\\n", - "\\mu_0(x,\\mathrm{w}) &= E[ F(x, 0, U) \\mid X = x, W = \\mathrm{w}] = E[Y \\mid X=x, W =\\mathrm{w}, Z=0],\n", - "\\end{split}\n", - "\\end{equation}\n", - "and our treatment effect function as\n", - "\\begin{equation}\n", - "\\tau(x,\\mathrm{w}) = \\mu_1(x,\\mathrm{w}) - \\mu_0(x,\\mathrm{w})\n", - "\\end{equation}\n", - "We consider our data to be independent and identically distributed realizations $(Y_i, X_i, Z_i, W_i)$ for $i = 1, \\dots, n$. Furthermore, we must assume that $\\mu_1(x,\\mathrm{w})$ and $\\mu_0(x,\\mathrm{w})$ are suitably smooth functions of $x$, {\\em for every} $\\mathrm{w}$; in other words, for each value of $\\mathrm{w}$ the usual continuity-based identification assumptions must hold. \n", - "\n", - "With this framework and notation established, CATE estimation in RDDs boils down to estimation of condition expectation functions $E[Y \\mid X=x, W=\\mathrm{w}, Z=z]$, for which we turn to BART models." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## The BARDDT Model\n", - "\n", - "We propose a BART model where the trees are allowed to split on $(x,\\mathrm{w})$ but where each leaf node parameter is a vector of regression coefficients tailored to the RDD context (rather than a scalar constant as in default BART). In one sense, such a model can be seen as implying distinct RDD ATE regressions for each subgroup determined by a given tree; however, this intuition is only heuristic, as the entire model is fit jointly as an ensemble of such trees. Instead, we motivate this model as a way to estimate the necessary conditional expectations via a parametrization where the conditional treatment effect function can be explicitly regularized, as follows.\n", - "\n", - "Let $\\psi$ denote the following basis vector:\n", - "\\begin{equation}\n", - "\\psi(x,z) = \\begin{bmatrix}\n", - "1 & z x & (1-z) x & z\n", - "\\end{bmatrix}.\n", - "\\end{equation}\n", - "To generalize the original BART model, we define $g_j(x, \\mathrm{w}, z)$ as a piecewise linear function as follows. Let $b_j(x, \\mathrm{w})$ denote the node in the $j$th tree which contains the point $(x, \\mathrm{w})$; then the prediction function for tree $j$ is defined to be:\n", - "\\begin{equation}\n", - "g_j(x, \\mathrm{w}, z) = \\psi(x, z) \\Gamma_{b_j(x, \\mathrm{w})}\n", - "\\end{equation}\t\n", - "for a leaf-specific regression vector $\\Gamma_{b_j} = (\\eta_{b_j}, \\lambda_{b_j}, \\theta_{b_j}, \\Delta_{b_j})^t$. Therefore, letting $n_{b_j}$ denote the number of data points allocated to node $b$ in the $j$th tree and $\\Psi_{b_j}$ denote the $n_{b_j} \\times 4$ matrix, with rows equal to $\\psi(x,z)$ for all $(x_i,z_i) \\in b_j$, the model for observations assigned to leaf $b_j$, can be expressed in matrix notation as:\n", - "\\begin{equation}\n", - "\\begin{split}\n", - "\\mathbf{Y}_{b_j} \\mid \\Gamma_{b_j}, \\sigma^2 &\\sim \\mathrm{N}(\\Psi_{b_j} \\Gamma_{b_j},\\sigma^2)\\\\\n", - "\\Gamma_{b_j} &\\sim \\mathrm{N} (0, \\Sigma_0),\n", - "\\end{split}\n", - "\\end{equation}\n", - "where we set $\\Sigma_0 = \\frac{0.033}{J} \\mathbf{I}$ as a default (for $x$ vectors standardized to have unit variance in-sample). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This choice of basis entails that the RDD CATE at $\\mathrm{w}$, $\\tau(0, \\mathrm{w})$, is a sum of the $\\Delta_{b_j(0, \\mathrm{w})}$ elements across all trees $j = 1, \\dots, J$:\n", - "\\begin{equation}\n", - "\\begin{split}\n", - "\\tau(0, \\mathrm{w}) &= E[Y^1 \\mid X=0, W = \\mathrm{w}] - E[Y^0 \\mid X = 0, W = \\mathrm{w}]\\\\\n", - "& = E[Y \\mid X=0, W = \\mathrm{w}, Z = 1] - E[Y \\mid X = 0, W = \\mathrm{w}, Z = 0]\\\\\n", - "&= \\sum_{j = 1}^J g_j(0, \\mathrm{w}, 1) - \\sum_{j = 1}^J g_j(0, \\mathrm{w}, 0)\\\\\n", - "&= \\sum_{j = 1}^J \\psi(0, 1) \\Gamma_{b_j(0, \\mathrm{w})} - \\sum_{j = 1}^J \\psi(0, 0) \\Gamma_{b_j(0, \\mathrm{w})} \\\\\n", - "& = \\sum_{j = 1}^J \\Bigl( \\psi(0, 1) - \\psi(0, 0) \\Bigr) \\Gamma_{b_j(0, \\mathrm{w})} \\\\\n", - "& = \\sum_{j = 1}^J \\Bigl( (1,0,0,1) - (1,0,0,0) \\Bigr) \\Gamma_{b_j(0, \\mathrm{w})} \\\\\n", - "&= \\sum_{j=1}^J \\Delta_{b_j(0, \\mathrm{w})}.\n", - "\\end{split}\n", - "\\end{equation}\n", - "As a result, the priors on the $\\Delta$ coefficients directly regularize the treatment effect. We set the tree and error variance priors as in the original BART model. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following figures provide a graphical depiction of how the BARDDT model fits a response surface and thereby estimates CATEs for distinct values of $\\mathrm{w}$. For simplicity only two trees are used in the illustration, while in practice dozens or hundreds of trees may be used (in our simulations and empirical example, we use 150 trees)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
\n", - " \n", - "
Two regression trees with splits in x and a single scalar w. Node images depict the g(x,w,z) function (in x) defined by that node's coefficients. The vertical gap between the two line segments in a node that contain x=0 is that node's contribution to the CATE at X = 0. Note that only such nodes contribute for CATE prediction at x=0
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
\n", - " \n", - "
The two top figures show the same two regression trees as in the preceding figure, now represented as a partition of the x-w plane. Labels in each partition correspond to the leaf nodes depicted in the previous picture. The bottom figure shows the partition of the x-w plane implied by the sum of the two trees; the red dashed line marks point W=w* and the combination of nodes that include this point
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "
\n", - " \"trees3\"/\n", - "
Left: The function fit at W = w* for the two trees shown in the previous two figures, shown superimposed. Right: The aggregated fit achieved by summing the contributes of two regression tree fits shown at left. The magnitude of the discontinuity at x = 0 (located at the dashed gray vertical line) represents the treatment effect at that point. Different values of w will produce distinct fits; for the two trees shown, there can be three distinct fits based on the value of w.
\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "vscode": { - "languageId": "plaintext" - } - }, - "source": [ - "An interesting property of BARDDT can be seen in this small illustration --- by letting the regression trees split on the running variable, there is no need to separately define a 'bandwidth' as is used in the polynomial approach to RDD. Instead, the regression trees automatically determine (in the course of posterior sampling) when to 'prune' away regions away from the cutoff value. There are two notable features of this approach. One, different trees in the ensemble are effectively using different local bandwidths and these fits are then blended together. For example, in the bottom panel of the second figure, we obtain one bandwidth for the region $d+i$, and a different one for regions $a+g$ and $d+g$. Two, for cells in the tree partition that do not span the cutoff, the regression within that partition contains no causal contrasts --- all observations either have $Z = 1$ or $Z = 0$. For those cells, the treatment effect coefficient is ill-posed and in those cases the posterior sampling is effectively a draw from the prior; however, such draws correspond to points where the treatment effect is unidentified and none of these draws contribute to the estimation of $\\tau(0, \\mathrm{w})$ --- for example, only nodes $a+g$, $d+g$, and $d+i$ provide any contribution. This implies that draws of $\\Delta$ corresponding to nodes not predicting at $X=0$ will always be draws from the prior, which has some intuitive appeal." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Demo\n", - "\n", - "In this section, we provide code for implementing our model in `stochtree` on a popular RDD dataset.\n", - "First, let us load `stochtree` and all the necessary libraries for our posterior analysis." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", - "import numpy as np\n", - "import pandas as pd\n", - "from sklearn.tree import DecisionTreeRegressor, plot_tree\n", - "from stochtree import BARTModel" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Dataset\n", - "\n", - "The data comes from Lindo et al (2010), who analyze data on college students enrolled in a large Canadian university in order to evaluate the effectiveness of an academic probation policy. Students who present a grade point average (GPA) lower than a certain threshold at the end of each term are placed on academic probation and must improve their GPA in the subsequent term or else face suspension. We are interested in how being put on probation or not, $Z$, affects students' GPA, $Y$, at the end of the current term. The running variable, $X$, is the negative distance between a student's previous-term GPA and the probation threshold, so that students placed on probation ($Z = 1$) have a positive score and the cutoff is 0. Potential moderators, $W$, are:\n", - "\n", - "* gender (`male`), \n", - "* age upon entering university (`age_at_entry`)\n", - "* a dummy for being born in North America (`bpl_north_america`), \n", - "* the number of credits taken in the first year (`totcredits_year1`)\n", - "* an indicator designating each of three campuses (`loc_campus` 1, 2 and 3), and\n", - "* high school GPA as a quantile w.r.t the university's incoming class (`hsgrade_pct`).\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Load and organize data\n", - "data = pd.read_csv(\"https://raw.githubusercontent.com/rdpackages-replication/CIT_2024_CUP/refs/heads/main/CIT_2024_CUP_discrete.csv\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "y = data.loc[:,\"nextGPA\"].to_numpy()\n", - "x = data.loc[:,\"X\"].to_numpy()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "x = x/np.std(x)\n", - "w = data.iloc[:,3:11]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ordered_cat = pd.api.types.CategoricalDtype(ordered=True)\n", - "unordered_cat = pd.api.types.CategoricalDtype(ordered=False)\n", - "w.loc[:,\"totcredits_year1\"] = w.loc[:,\"totcredits_year1\"].astype(ordered_cat)\n", - "w.loc[:,\"male\"] = w.loc[:,\"male\"].astype(unordered_cat)\n", - "w.loc[:,\"bpl_north_america\"] = w.loc[:,\"bpl_north_america\"].astype(unordered_cat)\n", - "w.loc[:,\"loc_campus1\"] = w.loc[:,\"loc_campus1\"].astype(unordered_cat)\n", - "w.loc[:,\"loc_campus2\"] = w.loc[:,\"loc_campus2\"].astype(unordered_cat)\n", - "w.loc[:,\"loc_campus3\"] = w.loc[:,\"loc_campus3\"].astype(unordered_cat)\n", - "c = 0\n", - "n = data.shape[0]\n", - "z = np.where(x > c, 1.0, 0.0)\n", - "# Window for prediction sample\n", - "h = 0.1\n", - "test = (x > -h) & (x < h)\n", - "ntest = np.sum(np.where(test, 1, 0))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Target estimand\n", - "\n", - "Generically, our estimand is the CATE function at $x = 0$; i.e. $\\tau(0, \\mathrm{w})$. The key practical question is which values of $\\mathrm{w}$ to consider. Some values of $\\mathrm{w}$ will not be well-represented near $x=0$ and so no estimation technique will be able to estimate those points effectively. As such, to focus on feasible points --- which will lead to interesting comparisons between methods --- we recommend restricting the evaluation points to the observed $\\mathrm{w}_i$ such that $|x_i| \\leq \\delta$, for some $\\delta > 0$. In our example, we use $\\delta = 0.1$ for a standardized $x$ variable. Therefore, our estimand of interest is a vector of treatment effects:\n", - "$$\\tau(0, \\mathrm{w}_i) \\;\\;\\; \\forall i \\;\\text{ such that }\\; |x_i| \\leq \\delta$$" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Implementing BARDDT\n", - "\n", - "In order to implement our model, we write the Psi vector, as defined before: `Psi = np.column_stack([np.ones(n), z * x, (1 - z) * x, z])`. The training matrix for the model is `np.column_stack([x, w])`, which we feed into the `BARTModel` sampler via the `X_train` parameter. The basis vector `Psi` is fed into the function via the `leaf_basis_train` parameter. The parameter list `barddt_mean_params` defines options for the mean forest (a different list can be defined for a variance forest in the case of heteroscedastic BART, which we do not consider here). Importantly, in this list we define parameter `sigma2_leaf_init = np.diag(np.repeat(0.1/150, 4))`, which sets $\\Sigma_0$ as described above. Now, we can fit the model, which is saved in object `barddt_model`.\n", - "\n", - "Once the model is fit, we need 3 elements to obtain the CATE predictions: the basis vectors at the cutoff for $z=1$ and $z=0$, the test matrix $[X \\quad W]$ at the cutoff, and the testing sample. We define the prediction basis vectors $\\psi_1 = [1 \\quad 0 \\quad 0 \\quad 1]$ and $\\psi_0 = [1 \\quad 0 \\quad 0 \\quad 0]$, which correspond to $\\psi$ at $(x=0,z=1)$, and $(x=0,z=0)$, respectively. These vectors are written into Python as `Psi1 = np.column_stack([np.ones(n), np.repeat(c, n), np.zeros(n), np.ones(n)])` and `Psi0 = np.column_stack([np.ones(n), np.zeros(n), np.repeat(c, n), np.zeros(n)])`. Then, we write the test matrix at $(x=0,\\mathrm{w})$ as `xmat_test = np.column_stack([np.zeros(n), w])[test,:]`. Finally, we must define the testing window. As discussed previously, our window is set such that $|x| \\leq 0.1$, which can be set in Python as `test = (x > -h) & (x < h)`.\n", - "\n", - "Once all of these elements are set, we can obtain the outcome predictions at the cutoff by running `barddt_model.predict(xmat_test, Psi1)` (resp. `barddt_model.predict(xmat_test, Psi0)`). Each of these calls returns a list, from which we can extract element `y_hat` to obtain the posterior distribution for the outcome. In the code below, the treated and control outcome predictions are saved in the matrix objects `pred1` and `pred0`, respectively. Now, we can obtain draws from the CATE posterior by simply subtracting these matrices. The function below outlines how to perform each of these steps in Python." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def estimate_barddt(y,x,w,z,test,c,num_gfr=10,num_mcmc=100,seed=None):\n", - " ## Lists of parameters for the Stochtree BART function\n", - " barddt_global_params = {\n", - " \"standardize\": True,\n", - " \"sample_sigma_global\": True,\n", - " \"sigma2_global_init\": 0.1\n", - " }\n", - " if seed is not None:\n", - " barddt_global_params[\"random_seed\"] = seed\n", - " barddt_mean_params = {\n", - " \"num_trees\": 50,\n", - " \"min_samples_leaf\": 20,\n", - " \"alpha\": 0.95,\n", - " \"beta\": 2,\n", - " \"max_depth\": 20,\n", - " \"sample_sigma2_leaf\": False,\n", - " \"sigma2_leaf_init\": np.diag(np.repeat(0.1/150, 4))\n", - " }\n", - " ## Set basis vector for leaf regressions\n", - " n = y.shape[0]\n", - " Psi = np.column_stack([np.ones(n), z * x, (1 - z) * x, z])\n", - " covariates = np.column_stack([x, w])\n", - " ## Model fit\n", - " barddt_model = BARTModel()\n", - " barddt_model.sample(\n", - " X_train=covariates,\n", - " y_train=y,\n", - " leaf_basis_train=Psi,\n", - " num_gfr=num_gfr,\n", - " num_mcmc=num_mcmc,\n", - " general_params=barddt_global_params,\n", - " mean_forest_params=barddt_mean_params\n", - " )\n", - " ## Define basis vectors and test matrix for outcome predictions at X=c\n", - " Psi1 = np.column_stack([np.ones(n), np.repeat(c, n), np.zeros(n), np.ones(n)])\n", - " Psi0 = np.column_stack([np.ones(n), np.zeros(n), np.repeat(c, n), np.zeros(n)])\n", - " Psi1 = Psi1[test,:]\n", - " Psi0 = Psi0[test,:]\n", - " xmat_test = np.column_stack([np.zeros(n), w])[test,:]\n", - " ## Obtain outcome predictions\n", - " pred1 = barddt_model.predict(xmat_test, Psi1)\n", - " pred0 = barddt_model.predict(xmat_test, Psi0)\n", - " ## Obtain CATE posterior\n", - " return pred1 - pred0" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, we proceed to fit the BARDDT model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "num_chains = 4\n", - "num_gfr = 2\n", - "num_mcmc = 100\n", - "cate_result = np.empty((ntest, num_chains*num_mcmc))\n", - "for i in range(num_chains):\n", - " cate_rdd = estimate_barddt(y,x,w,z,test,c,num_gfr=2,num_mcmc=100,seed=i)\n", - " cate_result[:,(i*num_mcmc):((i+1)*num_mcmc)] = cate_rdd" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We now proceed to analyze the CATE posterior. The figure produced below presents a summary of the CATE posterior produced by BARDDT for this application. This picture is produced fitting a regression tree, using $W$ as the predictors, to the individual posterior mean CATEs:\n", - "\\begin{equation}\n", - "\\bar{\\tau}_i = \\frac{1}{M} \\sum_{h = 1}^M \\tau^{(h)}(0, \\mathrm{w}_i),\n", - "\\end{equation}\n", - "where $h$ indexes each of $M$ total posterior samples. As in our simulation studies, we restrict our posterior analysis to use $\\mathrm{w}_i$ values of observations with $|x_i| \\leq \\delta = 0.1$ (after normalizing $X$ to have standard deviation 1 in-sample). For the Lindo et al (2010) data, this means that BARDDT was trained on $n = 40,582$ observations, of which 1,602 satisfy $x_i \\leq 0.1$, which were used to generate the effect moderation tree." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Fit regression tree\n", - "y_surrogate = np.mean(cate_rdd, axis=1)\n", - "X_surrogate = w.iloc[test,:]\n", - "cate_surrogate = DecisionTreeRegressor(min_impurity_decrease=0.0001)\n", - "cate_surrogate.fit(X=X_surrogate, y=y_surrogate)\n", - "plot_tree(cate_surrogate, impurity=False, filled=True, feature_names=w.columns, proportion=False, label='root', node_ids=True)\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The resulting effect moderation tree indicates that course load (credits attempted) in the academic term leading to their probation is a strong moderator. Contextually, this result is plausible, both because course load could relate to latent character attributes that influence a student's responsiveness to sanctions and also because it could predict course load in the current term, which would in turn have implications for the GPA (i.e. it is harder to get a high GPA while taking more credit hours). The tree also suggests that effects differ by age and gender of the student. These findings are all prima facie plausible as well.\n", - "\n", - "To gauge how strong these findings are statistically, we can zoom in on isolated subgroups and compare the posteriors of their subgroup average treatment effects. This approach is valid because in fitting the effect moderation tree to the posterior mean CATEs we in no way altered the posterior itself; the effect moderation tree is a posterior summary tool and not any additional inferential approach; the posterior is obtained once and can be explored freely using a variety of techniques without vitiating its statistical validity. Investigating the most extreme differences is a good place to start: consider the two groups of students at opposite ends of the treatment effect range discovered by the effect moderation tree:\t\n", - "\n", - "* **Group A** a male student that attempted more than 4.8 credits in their first year (rightmost leaf node, colored red, comprising 211 individuals)\n", - "* **Group B** a female student of any gender who entered college younger than 19 (leftmost leaf node, colored deep orange, comprising 369 individuals).\n", - "\n", - "Subgroup CATEs are obtained by aggregating CATEs across the observed $\\mathrm{w}_i$ values for individuals in each group; this can be done for individual posterior samples, yielding a posterior distribution over the subgroup CATE:\n", - "\\begin{equation}\n", - "\\bar{\\tau}_A^{(h)} = \\frac{1}{n_A} \\sum_{i : \\mathrm{w}_i} \\tau^{(h)}(0, \\mathrm{w}_i),\n", - "\\end{equation}\n", - "where $h$ indexes a posterior draw and $n_A$ denotes the number of individuals in the group A.\n", - "\n", - "The code below produces a contour plot for a bivariate kernel density estimate of the joint CATE posterior distribution for subgroups A and B. The contour lines are nearly all above the $45^{\\circ}$ line, indicating that the preponderance of posterior probability falls in the region where the treatment effect for Group A is greater than that of Group B, meaning that the difference in the subgroup treatment effects flagged by the effect moderation tree persist even after accounting for estimation uncertainty in the underlying CATE function." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "predicted_nodes = cate_surrogate.apply(X=X_surrogate)\n", - "posterior_group_a = np.mean(cate_result[predicted_nodes==2,:],axis=0)\n", - "posterior_group_b = np.mean(cate_result[predicted_nodes==6,:],axis=0)\n", - "posterior_df = pd.DataFrame({'group_a': posterior_group_a, 'group_b': posterior_group_b})\n", - "sns.kdeplot(data=posterior_df, x=\"group_b\", y=\"group_a\")\n", - "plt.axline((0, 0), slope=1, color=\"black\", linestyle=(0, (3, 3)))\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As always, CATEs that vary with observable factors do not necessarily represent a _causal_ moderating relationship. Here, if the treatment effect of academic probation is seen to vary with the number of credits, that does not imply that this association is causal: prescribing students to take a certain number of credits will not necessarily lead to a more effective probation policy, it may simply be that the type of student to naturally enroll for fewer credit hours is more likely to be responsive to academic probation. An entirely distinct set of causal assumptions are required to interpret the CATE variations themselves as causal. All the same, uncovering these patterns of treatment effect variability are crucial to suggesting causal mechanism to be investigated in future studies." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# References\n", - "\n", - "Lindo, Jason M., Nicholas J. Sanders, and Philip Oreopoulos. \"Ability, gender, and performance standards: Evidence from academic probation.\" American economic journal: Applied economics 2, no. 2 (2010): 95-117." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.17" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/vignettes/Python/RDD/trees1.png b/vignettes/Python/RDD/trees1.png deleted file mode 100644 index 0a5bc3acf3e9f2f82ab9fa76cc11949c673195d0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40369 zcmZsD1z1#F*ETWW(2XDhLw5=i(w%~Uba#W&(v5VNNQ=@bAxaCZIxghH{qYVlY*)eGp`iD|!p`s+~aJm%|}u{z}t zo|ZzqC%AVn5rW3rN5wBubs*%fMB;B?*yQD<@zRR#8yVXP+tvCPT$&8~VyzK#}J3`-i0|*#0%=$Aywx!$;GP3+`xUtLz!t zarkd|o93#Py_pbwLz|*=AJ+AJ&EUlo5g4{!9pMi&tMb9yS%(fS_Jd`s++8PKk|h;{ z5!+p(K4}$hPiCecwYo2U36EUg;y&goc`{&%t)qa)P1|QfTP4SLzK~??#XXJMNx^C_ z9*q5Bj!BIKlkj<(v4k|{BU7{|CA6)_dt~lBSHyDjBU%Zunu4fzZYbskjXL&_hJQ$2UVs!kzffvS!#zQH*_w~p7b%Bu+DZ>+j#T<*#j>u_OFyxvR z+VYml$_UKhGZX| zl$Qs8tD8NwuyAy-c5=OY$h!q@HD#lr?W(P;BxvU3z;0shWNN|g>EH~%1VPAC5PWp7 za5aH>I@mk92zm-r|8<5S_zXYHK@I!s6jwW8YHejzn55HF3)p>jPIgXe5p);~CiK+Y zQcz7w=AX;Kzl5o+U0t08IXFB#JlH+%u{%Ar;@}bx5a8hC=HTXL181uZgLXo2xK2HT*&U{rS6{7M?c0p5*BAPq#n^IpE)L zaItf8{C96~sSy0ApsJ0hg}siHjRP1S@C*?iE>}b zoBzG?v5Uo1Nhb&JNLP_xGxN{Izkm79g+d(gp8qxye-HCtN5MFYpbK&QH)kT~vy0SV z-H0K`ONncEBJR9In^T@V@5#ZWI`l-NOUMYpeDz7S(78#UF5$8{>;((arly8kM*oC# z-(dWku}-(06QKiu$rX_DjebKVN4_4w@lo`4mOUFETXcjbrd zW6bRm#bWt%xd6x?KW%vsZvHxbR${bxgg<^5kqC7?Y5wOReyO?zi@=+`Aj3-aXR|XAc-dMU{WP=x z81OmqfbF`*f|uF~f81oB4ly(QtY${X=8wb9IN)&8>vCfT@&`eloiZ>yHO|!pCWtmx)E{k8zrZkCWbR@9ZCYAz%YOPWnmzduwteef_Zq4!xOw zp3Q*f{6t>mJ`0QV|K9tM)9EdPo#Y>fvuVI1U6lLKfAttF+kgi6BQ?Ei?*G{vCM4Ic z6AhOd?mrI;BRO{suYRztmZ4q5LVDo76OTo9|C8Hi0|s8(iG7(@+8Dp)EFu9cRxYHK zC0gU#f&P~(l-y?Bn8oU^V|TtZsATdvvG>GLHf$ff$Nn{7{9y<@Y+A1r*wgN+Qxk_k zlxPFU=3*L7T5qnLG6da9243)Sh^rXW9*kP!d+{?u>kgk|7LUU$&mdO}8DHw=`0IY~J8r*$^Q-0i zf{023+|SZK>t!5A$t{lLN#dQDZ7g+$7f=v<4{+HS>J337SRHz>m;Ca9N1rUEFYPPE zG;EJXZbZFW%b3;S9MM6pCYKH3)wkxoi9BDwWv#kcHC!@<{-CNZ4elmf~}Kx`}$-x33f){DjupxioQ>yP(<-L1 zr#OCnCqGrDgZw$-NczDStKn?nK@(l;(Tnk&)!zq>6EQuUYqdI@Ed4QtO0HNS5>or( zxQb}XgYS(r&ai<>T+`F@?MkgLxuiBTHLsG{^zJ(=z2S)1=QaO?JJa~2;Am$?9Tk%V z=GkVdEa)s2hH0}w;CFN7kwWBS_FI~1Q-N@b zH1b&GXl1%@70~XS%zHa68bjHRwsR91T# za*}UR`lx0E@6LaC(Di7PPrM(Qt$(yYcKj}8AQbH)<5zNQbApV`=AM@SEUT(bJl#>L4Ho_FLAM}`b*H%rIv+ANfs`>uJ@kTa4t2kCi-TS(~F=t7~(FK8VmI&RVn^&zHvNeZl9n zZ|TSIE72O^HZr>zyZ7GawV?BrML?1({6e;!eR($QpBjcm#+*Y`R**=iNUT$B8Y*<~ ziAKSAbFRsyjY-rnAVTiG=f3ewtyP@Rk1m3-H;)P;bn*uFn~K$5MP4r4T;xby&e(=-2)}=7AiMNYK*DGOtUxm=U}A@yT08_8@_!t@rj1Wbl*l3Wzb|a} zTXX{#e22wTNur{&eYa*#P5hr01+1G~J9tlJ{c%`dJU}9fCG&YG?XQ^xBYT6NJF>h` zZ#DQQR3QRur!hZ6TkhB311Po6fXL3iIqYs#{^!}ekjBPByUa)wy8pTNr`3g$+I2q| zLT}dd-&)i574mI-`TI|^O&Z*QtAf#1A{I@#wZ4=!uph&QxiUS3dh+guV%#Q9W=TDy zSzKiQ)e2{}z*4naQfc~n&xo@iO0fNYj#kFNwJ%PsDM2V0nehbzI=T8FBPB~m5et;) zuTR7_{YseuTu8WDZ)~`_BW1mk+d$AyX{#}AN9sXC#m()GCN1bYx!7y(TsBr7ZqO%#TY}Kh4g&e{tY;{ibR&R`^`i`H49}wd#K>ikKjv zD0JO~Skm(65Zy?vl}c}pXe&IX4zaD?DEwTG;wW))WGr1VsR{V+nhJP+c2n*~dLfP= z(RQ%bZ?V)7YH4oeMKMw%_GRq7m&@j8LHOF|6_nq0ITIw;#^jUP>pIka6S6>#)=M?Z z!A!dCsq)(0%;$!T>P1T2PV+*=n&p`q<+_iJ?|1(OL%_u35Zmfrj*OL=1+--gdl$Oz z&Mw!XmR-vWpJX%$W>fv5HU}wsz6-qdy>!*eIO;pB_-II z$sOVI=iS@ld;F{|rXESpKU)mEWxhU;p_VU&rbQrLcuz`5a`m1Jp^KY(&HjH+K!tLy zRwK#@b&){?UBAKHwx^DxIpjb%cQMK^u~}JQjYjPb!N2!KK=XW#FxdHi zBC{+CrZx@WiN)%tgqBx`4vI517NK|_eY`QMmj7w#A`=qSOG!EEb>_rEO2z|^ib%ry zVDTCSixu)nDVa%e87-ie{o7>EFa+YX!lIA&1~>O2jRWT2x!KKBn}wSXXMbJbRI{r@ z#$(m45`%98ZwkmSaBK*NsUIAA5^v#+AiK6op<|Rh>#AJE$HVb}(}93WsBuwd?=vJ4 z{d+o+uq56YZY}((a0F_L#})cBT3{(9O0V)r5lYg&1#>T7q~tRfQ?v5dP%la&-Bs25 zQbLGXzNVN%Yc;}Dr0XDSdliwh zLd-ina_stzFtEUq;Mljyq;vK3$I8LH_nth+zUF)LAGrOmeaTXV!rp&dyGQ^p1W>UtO3gNckdWlIb{yyJ14)6Raj#YB3~GWrq_g%p6QOMEL^ zuis%(is;vv_fkto5+;b=oDJJPxcNnW0=_~>ga;g#-x~(BMc~m)1ZiPg-PE{Hi zq0xEFJqg$M`056b4NVHeRC~M8=L&;YfOn`BDUlXGE=?W#r0>1^mdhxO;qOVOjSDcM z(c7J_mb`C2ojdOYC{#o-qA_s{9r6Cq``!94v`+Ky@rF-WRw7iDX60pllEk=QoI=G& z#2ErIdJ@`mKF?bkVeiD^cDIvKIzje%&S|ArR+j5g8hA2=CYW?>V> z>axlmyiW7_8A4=Chg*}SYb_VwlY(#I^c{aTs$|6nIm2RAgTf3EQE*ugAw{QK2Il>9 zhf0OtZEJ$akJam7HQ}DsP>4`ASquTAc2(|yiOuJVmpt~= zQ~BX%rv2AAgTZ40cz^tp zm2M`s+XKCh?>6a~G|RLXh5V@gPB4Mm(98W!oC7e}{ebU~_uFuhN1bSb9FJ@|glzoTl|my9xI9hhdnc`Sh1TacdqIP_c0U_AWdR6~rM^<$ABF zwaG<%w57}X(l~N5u>_tSy*bBfjRae>SO+Ve)nTEaYNh`{5=ZQ=i|gjMMHPavBF=e7QxFs5cxcP#g75*zY6nl6+zlC(*Nal5|kIb`Dx{$2(h3=>HC%dw2n8`v3z?ijm(ri6wtfqOkk{ zal`HOY1<89;8WZ#6sn`VP`*rDTgp+E1lb%B-@>PBeKIOB+x*G5B}jrUAPh?Hsl;Ju zX^VB0<1Cf$sX5I=ofiYzM8m|bc}OorxlnohdS;A73=<%Phy$sr#`2ezVpiTaS0|;n zAHKNnfIl2B@b@P53~B?BfrAsTn|9JF$CvmG!PM%z^Q}^3M3<_f+dMyTq0zLb^7n1V zq~`rD9M&iWEcyuhQHYZ|+VlLc1;BMs{km^`Vz~4hodnH-Ko`HqTcMV_XlUu%t1& zG(BAl;!^|^97QNSQ4-+CIl zH(U-)mn`H?H_{k@11aCXHe)Cyu3#-AVFYTIL#kA{>sX=R39^i5!$Pi>)BsSLtT*$lP9ue z&8Qn!&EMZCmV4w6)ysXGTdaF`O9`f; zB{iz&KX`8|Un(IXR*WY?zWKGR0@R;R9p2_n%u;*rzXYePf^ctgR$O>u5|_Ra=w7zQ z6)1#d&^VOPY9+)=1Kb1c@OGRO**XVYecWP9-P`=9W`Xk7(q(dQ>|Q^7S*Vc0%*=hq z$KhW>euvgqIm9E3>8^yxHiQ@|7a&a!L?`yjmh+NGik}neDw~oLelliv4`>~ElmOK< z(GcQ%gcLCw80$2U&Xvl>m|{)p<>!iIKBVs1`oQtmDvAJ7 zX@n4m1-V+1RcpYJJOa`LlD0o(eD&LCCV=fee{P~t(*~5n0_-uwa1kTqAJ%K6ctZtP z{kC8!ct#@Jnwa8aAl2(3$t2DU*XjBES)lVr3y77>YU#0J%XTn|3ZYpKkbD)yq?>=O zp7?+a2n<)vnk$L%Pz~Y=V7Aa4pu2_3$Vk>!U3Ib{{OIf=*3Oy<|DkR(h>q5fIZh!7_m*)^Q+1)Ae62FyM?Ft=wjLuZ>hI9C214O@FrbYJ60*x$#|4F{{lXk``pL(()lL%N(gsF&~r)JteW5Ws;cQA6#nG|KB z4%F>>w6tWBqTVDn@q8LGf^X@Re*Fh0MbxuD@v=JwaQ*e~b#m^C{&r%SX4L2`e&@bDV~+3742QMq zR*NeaX`l{_PZdqN0=ZYM+1=sn=NG3Lmn0I9#^aVcZf^uvX(x8Zt0x@4iid#QgE-S^^c zz2aUqR>KwaI(iL`Y$TnWO=*AypVYtHrIURf@_2|#?-7s=zJSE?E(gJK0oMi_Z^K!X z>+jw)=7@!3lNSP3HCAV%E~|-}WJ%I&JQlr2A$$MPghcz@cnL)fB_T0_k(c)=JBE%QyiZ~`T(kdnNfu#9j?Zy>IWMu_*(@`nBgv1vro(pj6GC}82E+w(KnDHF zM7He_17%|FFfFZ}ZR9k-JS_j#>x_93AzGUYt*z+wLAfkSw8lh;*1DeII?|sd&+^qJ ztXI4a*S+3omMPSrE&7!JEe0f!fT4t5M@+*U_#!c-4agPRClFVqKxgIZSXtsC$StC? zG{1ge0>p>0#wTVn=!Zt*U)0RVA7KWPe`Bz~cxo8(&+5{bMS$<@riz&zmMRL66svs) zt~}|&Mt+)=)(-tc{&C3wIPdB1=aZ#*he~WNQHLe<)tDNw6e`vkbb*<$NizA7xo_a# z1v{q*5HrCS?0R>lCA39Lvjcj>`HO@vDJOm)Br3vL=0jR%fHQ&Yyh0?6(Qp-s+|;gt z;{MR1dzLVLA{)-{Z#_ymqOupfxxs6TP{t zHI@$t4DA!#P&phH)xw}kW6^pDoCcb!^W!9Li@rC$07N7%_+2Q&WpMxN!<@-Xv)+>M zg^H*XrcmP6w}RV$wdwD!a^VM@Rou=GIR@kv_wp4`>>|a0jP}ZQvINNlRpdS33&|iE zDAwE06h1rN8vEew0pDZMWPA@_rU|%MHm#-Vtpaw*g}=J=92ZOj-ZP?}?tAlWZ|y!453EWeCvg~w0D2{pYbzUj_t6EjW*OthsM~2k$TR?;nGUG^7))J^=qH)N%sq4R}h# z7<-neKUm<$nqC0Wri=vPi(Va#D;SgVJLleG#@kGK9^x(2Dp#J zq+X<-u>G8XVRgFN3~o!ka@(AJTPpN(+Dz+i57rc(@e$|tG`H2ip%B>XJc!mxi=~0$ zLTZ6XD3DpALeN8W9<2S%5i|^>I1qGhTz5AJRD`GiN~tMnc5ih0QeS>x7Pt`kFvsWM zdDa7uhrqs}{cPaz@^aV7U;Pegl}Sw+u$*_+2220m-*6lrPyhw$ld!p$F?;HK9GyzG z(9@qgI`Z=J37DjF4^FPqS}&yqmL*k3(ddlM4vfr#@ zEG`UlZTDaRYQQO}9GhM};pG+ZvJ|!pJ$$Z?KX0)>1f;OdYH|X+rOhvNenkGW_dwEK zBn7T1C6{SPo6$;n!+c_n-?_YR2Q4POG08}0xY@Unyo2=@PH-(Hd2!C5K?Z-;b zW0J+v%9dFJ6cw#m9MMZv)wCLm{*pH)o9>6NCDFvlRBPq!@T zCD?b=E>Zk5vk{>{STxQ1X!Z*`|5eLiPluq45lXC=aA^9gdw&Ul)wls`NTI3P5nkW4 zrI6u!%IES&&(ouCk0g07wn{5CAb66u4q^EFjDZXOHwGtu0Lv6KJmvsQ2;V!*w+ zPJ@ikF?oIPh2s2suUBfX6m03VrA8xCVT*G_`kM*&5sEmm8ut2|&K)-#JwcAS8EZ3C}M#U<$`&_~8wxuCCBSuhb(NCPgX%YnBy(9=< zZ|GyWDmn$Z4IlF0)K{dHNWJLsW~IQPla*?q z?tAl|2$OEz8=v#d*V%hj1zbyh@TD6egK*^xltc67uIGJHLbE^S{OJ&t%*7=qK^ARq zX4DPJ{n3Vu!zg|W!-HM7PR?fU2X-?M+D!59Dkd{hDZjW+#Jd0dqgeGKLme+XAOor} zI-ZI^sGI-QhJH7E{DD%9bJBwE$=3rQMQN3@N9n|ZDujFMrDZy`GP(e}Am74HX2|xG z5IO%w zzPvuC&S#)=kXi*W)+umpP8egdT=_Awk?7PynP6w3BS7|jV^S)gUJa8WVTBN#;(6@N8=#R2KRb$$my^%2 zI$sz$5<6}0g4Kzh9+vD&+O@(M_!!lX|2-@X7UoGrwLu`)E)qiR~$$-5h_Cv zl0~na`3a<|@`)(U&7jE7)*Z}M=~Qw#u)4MZ)F)Ljw3s|xr-jHZc)HiZs4XyHwWJz; zcvc+y0ceFmRtT-5y?e876@#t{s1s)2xs|$ZO~|`l;`(vo1-$=s%+jFl{Evk)kC!a$`2hm7)28EaaGm-m;`K_xY` z)ngz|_DkXj6MjOp7UJkA7^dy6!r8%E3W7;P&U0eJ z+^>ORyjDYZdA+PX1WeaRc0Hq2nCheo%sZ|Z#mO8k`71w{YN@Q*o2Pn#q5}m*x*OZ>ooOMn*U=yWL70vH{ro{z=~xV&2Z z=LpYRE>e&fIocTV2b#(VB)!P7C!7z={@VWy1OX~2N_{5z-+#835_~ZQ*+}6F(ya?T z8X3mg>Z3i;=6M-j2f(z1zr16EE_T|A_=hWbck%NM48OU=IwS6EfmH73E75V?>J3y;U2c?b8U8 z+nZ}rx4BLAt<#H8LZw2#Ny317>pparu)e@_84+X|3hIDfwem`*(4S4OA>;{+(g?ctySF|oU^GK1~C+~d*FA>$v5Q&M>!ms5i|1Dwl9vcw+*G8(l%YOLyTFV}sG znTE7(0Oi75+5k!8DZoU<$2!LI7PKbycf8tGl^8+{!_tjvb*Q(YBNeBUV~N0arA?0~uO>0OlE*5UG zou~KXQR1q83}oC35`{~y9{6IEtFJ|mQW_Tgn%UgkH^>vTi%tCA0uK7tUh0jQ?DI9p z`5D|I+(81%AV(s7#=3M5(U1YOAZeh(BUSQrM z#ayiT@KZMh=Wd(CSt(>ACXkYt<{1znlY3`?VVTl|DZ#J+GJBw8UVGqPWHuenj@(s{ z?`M4%<$ZcThQN4;bL&#eGoZ?+xI^e8fF`0PA$0k=>WxhqS}>LnU%GJDtQ6EX@NFcn z!9d>;D90(W!d~j;1A=(V%?jlW^ekFi2YVeV9sOmAqMV^kbBITd`p(I$Fyh*zN-w=y)Z?5 zd@8z0${5n$9UYgkeId3RGa1OmwODvtmgjz~qc(9YL`WfE_!TeKHeUtFQKt%IkCGuX zMB3H@syEUI6RJs?&q(Nt_63;;kGpZf38z$?b&Ip+d1&fIti?r`(>;LX!o1-e>r`J? za$ZHhx`l%HN!P5Gqno{~IaecN+d&Uj5C;8*4Ta@kZNL8Sz)naGp(PXQvZEP@q8t>; zxTGcMILEJ5hZ|yQtE*(oEIP>96&Y=BO1ECIGSjm3CF*aaC`XJak~S58f}xeJdAzr< zpt`DMC-)+aq&sKN9&SU+zdIQ=;6C{$=&fAcp$A)s87?S+<)&^PGFv4-6 zfz9%+gI{1D0gd)0LVl%bplO4^yev{UuYNo((ReQ!oakmuNccU9>JE;wU(Q?4b^*xs zOXQ#J5!7-}2z~p|l}E`=ZZ?NSXmBUl|7k7anrs0Vda$4)Wr?1K- z&2<2(cdfAi_G^0RM;;s(+wCg--!X11j1%PM&5^udnm$u$DpagHv>>t7Q`@N@6ndpI zk#JZoyS=rhe)EYQ@7a)O+Adj3%qMywvhP%IIm*Qb%ZtP1qeLRSHU<1@BM&1s*&gyS zaPZ43Mk!|l6+^}$kN@f~1(E9Hm? zuPfyM&lFx5X(}?T$Y8v73hG9h-YMKx$w{g8_S5tijlGc4ByyrDRlMYlS$qnZVn5&z zclA_caPAe`ye}hGCd8YeRT!DssP*k^z?p%It+es1NPeE|9Kat=VbMzUnD>&kohmC< z%@Ow2bZlHg)u}Om4od!}aIfLc0h9B5|B0XGx*%kde-zfqrrr?SD!OZ-+;4Wwaie){pb!GX^lmKZu4uD ziWS#;jY^Mf_7Q#!01QzbUe@~w05tQt#wS~quA5^;t7KVdAu+(~Mu8CT^k?#xPP};} zS)`Of&GF$#ZGVZXHAlcj1{iz^^l|aGp82f6O=yE=_xdS(r2cnXaq>U=`1-Io-0?d* z1f$TDi3VSE7eAMX>eO8aTqG9lb@&X* zG(UGj068SfwN;>))xBzi7fgBuZGg^({Y;HKsN?v^~=W>8&xUGdMPs5FNP*-$AB(Mhj7r>dUBRzBq$|lGp;3yNo@5A?)c)Tl&~LMsX;eR7HCt~|rK0Jg zv^OId{46yw&}Ra{|G?+$za#>X>M(xGFg{z92qit9CxzGEgB?q=Cvt75NN_$C1qD{A zc6tAgZv}D8vhJs0xlRG}Q*!43Ey5+eG&r!$xjteD?9~J^b8<#%!XbgcIw)|U;+4SF zeV8poIx32c*OOb^baQ<{z!YRP^zxonY%+)c%ue5s!kU9~)GiXnRvHj@W|}=JUq0q_ zWmAd;g~U`EQxqZCRiG-38cTgRW>b*}KEjrwK$UPX_kqM$BJ`pjQ zyu^^&k~p0PQdcjaEmUCi8z$%O5O9)m>kD*A`WG=+bxfMppB&_3SuaN5y#4@iQ9qchJw|2K#22w?D)9)0-77tDX@K?6r6;+)R<~}5WpklN zDx)uy#)`&BWQ<}C)I^Q5pl!Yv3G&6kb zj1kdoh&U=D8c+u5Mk4rZC*<}H2Cb@1KiZeG=Ns3GaO}-`p47!y=CCWwP0~ls;>so&ri#nIJ$obJG4XALy z^opG!TZ#)}wAdub>Z@K?XbgE?c!cIB#CWp5)Hx1X>Le8#46~LA5_{@+!@*oSuJ^NV zujgU!p6o+N#~n(&gP5$Fvxr&$gYCUSshe0^U!r>z4zxJdE5PitSXI@F6My53&Q?!- zvtkN;^({>N1v?jnXyB)DHM88}?7Y_A1_K%iU4VBhU~yZ1PlFN4Lfs)4+I7}=q3#7( zgDY}vp%7ziL6Z*1%v-y(gL|Q*k9!m89xKE3lrx}}MdM z-h3wrHI5GTleQ3N zOQW-eJ<0HZp*oOUN z&5ktG;bQDSg*;&)hVN8}9Jpk=Zu7eBMeTJi*svkg^ry z-^=&bO-y=}3wD`TkL4zoV9MSBipLqlaVRx^Y?+BrUpn^fM{ z&mLSXd+tb_3z0lWfyfV(9@X6wR4R!d&@Wv9x;M9J7xb;O8Q|M&l9ATdBlYm|A?93! zOZ9Sv(MDZ2gVICk*0_3{SZA-LG_#s_tH^3^9DJGXEisiw8QWcxth}|K8A~9DBGk42 zKqNKD-Rw0|^u)v+YC`FK^evB1=Kzfgr&g#Pt;oi4Ocf>V>+r%To+BuY*hg}9$shT? zK4mr|FQx(Bx|F~m{V3a3DuHns*Lt@8f~ETb9GXPWlpE0Uw4}=Iuqv^4D2=gFW_Kwh z{j^7Cv+>eC?`H|VFBZfZxF5-xA?}(aMR4#1$P27`^-7_$UjVp!Iu^z(sDVC@gNPa% z3W}|AISJ5Hk?Zru!!ts)2GDo$XURugS&`5@V*|8qeeQK+tb8iHNFtT!dJ*zs^a8m> z87i^V1=w4W=8xH2tm!(ZAz*4PCw>A_e|ZR@6fK;N>PBF6+|tYrWvj-^Ax&Naf~4lz zK(_FM_&CG%>7w;D3u@c|bxO_DpK7ZDwV^+ur;WfMZ8JwQB-eit&9uJreqgfS4a3no zx;WS7$?~ktNUWMQnj>3Oi=={V9W4jeEKi9iYT>gfq#t`gu`jtE;6CXvehPFN?q^5V zqi1G69?CozXS3Y1$`304@G554_MM}lPlXvxdx3nL0pyY!;cfF{@Qhx1b<6rgP?`UD zRj5??W`)fq%u{96s~c);j2wtE3#~<6@2;>PeiXqNCZ`GnNlqz+k^59Z=sqU6fhb$D zWjv1bx2>&=D^Pja^M$CU_^*vKZw+E6`9hpUVM?6eaTsR1=@pPp%CV7$yjdP)3jO7C zWhv&uD-qgNH&REk5+}hH})!VH&14`{(qb%2g74Lerwecf@ z1JZbro-A_i5(3QKMMy99a%_lDq=$Od?s0omM^`5;d_k1KBChm#$Fajrz z*M?IOkxafBlGM1&aUcbk(V&fVHqep(VaGmuGxNuPZPjMMZCw!^;( zzcUmuJzc9z`%%`r{e04NT1MbZkRJ+tFPa@*H5Hu{! zIk!Yh$%`eiZ6uShUVS9LNufo<0||Z?mp#;V8KfbaMT;0QZX55HEJ|}r9NUwOrko2# zb=C>R^cR-%ajso_NtUXHe<Ch)V6u8gF30@=+~?K6uO+aA%@e;_*oN>bpu^*wISK zk9LV>%DU2uW&s^z{8LKAN}P(M$q~QB$UtH06#7ovu0D6SD6w@?WsCt|?7RFh@00J3 zEyc9E*frpq&T4;}%03!fz|5mpmqOOaPIIRqkGU%s@>qAJau_}I4^hQYjQ&ER%<_YC zk&Aj`3E-0_xci-pg4nERIm@>y(v!1A{ad2%3UHekKJ075>PRo;Wg~&L%bE^l3Bcv> zcIpt$!GHonNTRbm#{GSV=^6_~V}m$Qq61aR#K$we6tQ-B(U!l5clyT;ut}l=OjnStKh8ViDj#TxTZV$Y#ojmXk)8 zXrC%KptHUk^1Ht|j=6j_Vfo0D;-xJ%iPP6*VVq&)d{i~u*fiKM4dOQR)Wq3={KLM$ zMJNlaEv~*bD9KndX*7?EC0R#Ja$*Y7wUr)+ z%J&D6k=fFLYjIPuIYAMojXD2%>5>TB;~Q=`2L)+l4n~0Zve8tI`7w4FtA%4165s>j2L|x@3v0pl4s)3 z+*I7jNSFM++4sGApL4gFwl>yK_lMnSy~Kn3hb)?9oQdU?afmcR1%4k+Vt86wa_07< zNs4je;JI0jeBgDg->DS}GRGXH%(nBl5vpTsOy2uEnX)ypoez2)%swY4Co9H|*NHVJ zqN^3p=Po(-om0$)RfFnTP*5uRn0>dW+G#5_ zlv;f{(ffmJ85QWtiVMMg5faH5MN zm=DnUTaK0@!;Ae4%eV`=6d8Sk+G47{!`}t`iPy&8{yc&3tB~s3%H!qc#1}|s-#ik? zP^LA?lg%S5pAU#CR;HOuDt?W|m)s7Xo@*3eGQR*XIG(zzA{Z3_`9aXI6B77JzN8Ha zZ|!wa)2iv#-p-N+t2iIp-9rF4DyClk@P;nHSefMz@1BhOT*X(SKk$I!;2z{k8#Xu^ z%nf6qN#*#cO|oJdXVij{(kO>wVmdI50rkOH=_cc=|u*!1ahukIStE!SP+ ziARmxY9EdjiCCYnCgHgIz+DNg^dUuOhu|BaZ*Pgn@kFFFL1Na{p|U>Z9|Df}K(TS7 z(Rz1@{rsb6Qyp=&gN;8xxy-I7TL`iC$kw4_Ki9<6(`=t8lOd;(`l*zIII0t!d%BpSgdR^)jGs9F5%UCmaDZGja!rw{D=>HE>3 zpC=!dz^yt9Z;#LZ{l5StO!XtU@aNMW%6;4WG4$lcxG%rEd<;ccc=s+&EV2H;9n}y! z#@F_rgUMefGiV45;A_Y8=A9zbN2O|rL6?H)as0?8wt04;0l?4pTAFb$Ij*J8a zsY4G{%iA4jabF^2lk-WdP59#GoPBd38^+7zIM+O??p1hpx%~5qJ7;SlNG1`t8*V2O z>nD1-%zngkST{!;R0@>7C*8BMG4xgCq`}dpRP={hCye3BHHRm+nw(H+`j#a zVUGu#cA7!vW<|H2J~;DrEJMliPX0d3g=Vp~s{Rbg(?hH|=($YgTc@xpcQfMe0bdtD z0ddN`^w6|wwryhuz3Dh_UMgD+TiK22s`BUORUvi_ApGhbP(4$7XK$|Q}TMVC+ZJ69Xgo^{O&h?2F9GSx5r^-rhr#E!u<1MWXZMZ zo%(CMHEBq3cMKWEIgI`gtxcQ~Pt#*(Ixp*JN!EOg1n-2#ClwGaI$pae4RNFQ>kZb; zBSGQFqZlKe4|Re+M(YtJNW631%$J~kk&r`hm=3(xk^+K$%apdDcgsuRBc>KsD{@6Q zWO6iW>7NBf81}n!O@Q~RR&6Q`e*+EZYMRGUy>~BjDLWP2fg8THw$b1{k9LC6zle?G zd4G;V{*f%9;$+SF_bbcK24PRw>GNQX)=Mae4xC<2G9A7V@9*&17UoMz7>iTJ%5!qW zRJSufE}*Ti97n3Ux;XLB=NG%@NU<7t4$LS;n29&r<6^as3EN}ZOjY0RckM`n!@@Ha z?_xJaM>kDiG1zIncaS1fwLk?1yx^zcTr`qm7{Mt5&tf5r|($pS(TBX|`;b^EBRW3ap@U)@bo+pgh2 zx~aVphy}5RukQHr5>JHo>T>gnIXYUUe)WdD=l^t)h`V0&0GmLIMXN$%+sR?3Mp_xO z;1bu;wWIMVsIlB)s5j%rOL_x;+x>B?eQ6O&u;H8XT#DLpO+CJMFLS<)0R3*cnA8@t zsw($olI7Q3d~N4F<8{)~yEmYEo@+rt8@Cq+jT}9R3+dTQ20kdV=TDQd7oYOeyWi|H z;)DP8p;83Lv!`8YvNV-?sI|6&m1>?qrjA1^d-E$>y^U_23$D_`_t{4Xw;wuIkseD( zT<}_tv~kh1x&xqcWqcly7|Co zk6>iDf{dPQ*#=9vDY}j2$SgV>He8(0}Caf2O9)F(( zk*&&8Ad%Ygm@1wtKV8^^zq>nqUvYIfJ5g|jnTd(XG-?F?3T1mvPL{xr5lPif<;~DK zP$MskS(8Xtm5XIE4_zRp$B#%uxRiulzT;B8MQ?&zLb1-JM7zj<^sL4NrJ}QOQnhvP`Nc{E-WEb-Rr*2j?THz&*Le}E z+TvEJxq4?-;#bG!svDvY+SR5%w1TY8QxWP|_bq<(MZdc5y9;c4r!UDwCnAU;FG87U zQFr`+3+1RY;SH*LN^bHw^f*Z`S|qM_@q8MPXgPn`9vU7nN!EKaDaWc~9q zvt^R-$!a3C4%-I{n$(F(j*1A?#(T_&O1j1$O?O>Mk0H13B8(>hP>X@;4%P)Pc)?Q> z6sx}7w=1Li{-GObPgsgD*o&o+b4C$?{-d^1qP#{GlA>&Yb~LV?_o={AV5N37HKjX& zW01Lxw(IDOi5>nWkAj$;POi-l-8P`ZYi8Cc_z zm#h>Q>QYtWQHqzk@mVmMge2(4KqjxEBPekat@xJUWJGaofbQ?EzX)@rZKv`FrF}Bk zYh2j$Z9}u|F)JhaRJdqLQXdK}M&G<8CJhz>+Xfv`Y5=IXDQ3iK?&wL{r%)M#``@$^AS0yB^Yo(>x% zR1_N8b-GTjsNh#cBPeykZS(#E70d1s>dqaks9mmvfZI@*nl>m*Fd$2v+qyr3_woH3 zf%;4X)-$*>&uukSHqZeO5@mmJw&)ah9!mWX=z@!|7?ciIL*`Y*|KsT`1ETDs?QIwb z7`j73x*HLs8)=d5?goK}?gr_UkZ$P|grQp+B$N)N5vAe(JeWlJEs!VD%ROCVu7g{r=X1n=L1Iw}BEv{R#ZD-crrR0Q)i;oM zh9K)^u&%K?GZE|FisQqhiP`vyz?$quv>HYQnbox^q()!|jw7(bAlJdhN(o(Qb=K~J zpVDLg1Y2a^3ScSo5)hXJ-;156q6hTea#ZuvO@Hqt+%@o~=Zb?UK%D5a7D{VraqmV2 zzKV~GMXRZ_MFivLL+1y%iWcukiC}C!E3}#r{`_opAf-cNl^DjjxjnpzbX`tEc-eCV zY)QcH%vM%i^9I#12p!sLKb&WD4+GV&k7Do$8J3Ujuuz>9O#8zZW>oB=H+X2%aD#j~ zV?L5Aju5&DI-4q(E^j#B0*%)(wVVkLz7v8h6m}KH0rWMnni6>xjczP<*Tes9KZyJu z<7eNb@Sivqrrw%X$CZVtzaQqu1>Pus5;e5dcL_9`xh;r%;ij^YtbzA>Q=g%Uu0n;H zGdn5>k3|Z`LPolc(ad_tH%`+gV7mK^!D)mfQ{1HD;ZsV(#+8hBi-VSTyWc`_zVwi3 z@p~184H|*ZNga|p{8{PryfdrT0;oSdzR{RSDdo$dGt#A9#JF4vv2;_rF3-5D( zsSB-jTsoXQ1gkpk(}>wdvfF>BuNniey?Q=j-f=Avs;ocTdW!m$xn02})}#KbSGeGQ zs%n8G~NnIh3d1fom>E8VUnKG3znx?FNB$Z5zv2C*PIHKdgm!{3!DyvC4&jKo5Jt1_^wQ;G#V`A z1w0Q)^mCvFGQiz0jphr+?Ir%fB{!Yde!va~L;|ko=nI)aoa5c=P1vTE;IXs)H{?A6 zx82tVgHGjx$zYs^hYN)PXAT14>m?*(oSqsGE=y+B<{(a8$fMD#v8`XWU8;a6wC%oc zL`4#0e7!X!B!w3*e{hluC=&BNfZx}q>9xDw(aoO7? zlR`;rnbY-lNT)y+sKla>#H~Txz_BOa8A*9lF;k$HK;NT9Qfdgc#7Uz5H|1G%I^Xu{ zcX5KQ!-$RYrfsN}pUS60?o^Z(!!cv94V3F;!u%lj`$+;ZL>LwbyEVImk)KO6$d=9T zEy$@7zeC4=)SS&j?IwlE;3Ti7BS_4lA1MUWI|}L7*hUXnATWr(nKVWd3O(jSq-Xy< z)(!kTM0Sn#Eg}zBTOr?IkT_k`qpscIlZLCQ6CTe?@9S%anEfD>|A>a^de9ZHDihfc z?@hJ5dv~4*892TnJI?fK>H9^|C7B?69wkVDt2?z}w($ET@X=QZ-CII4Vc;SEv-eT+ z;x@1Z#g)kk579xmvZG#6t8F-5`Mv(jfPQ_dHst{d{%b?+01)C(@;8;V zXlYNr9ih{D*6C|912yGJPEa({jt<}pBkruC7Vo*kUR!!2v$q>T%eL4=5}c2e&Z*{4 zoQPIZ!%LaNb@245wcXta(Kxh;P>&9Q4HHL{Ii*7fX@!x^nvZdgn*9%6wp4knl87`c zzFVIXNe*V$50+y{GAtiA{JADWK35Z>vbg7-^4x<>WtsxT3NX%pv;1GH9VTWN<+wB# z{U?Mb>-u8iN5;B!@+(J2*KJitBIS$PCbGL$+TcrdQm#MPP-IYL6)8{7pey! z`p_7QcyKE9=9{9boM?5+e!OFDG&i2h<~>%%D%P+@t}8~JTwoHevH78Ap?GUrj6+A@w|fie}(cK+yt z>i$`T%!=Tw1kK+&Ksb!bI}80IA{dYxaCi15SeKe63OsHoq1fn!VH;UeFMyS^&i(NF zJAth?f*H5f)HnzC=YH+g$;o=D7rUmXJ3{_v$vH}C41A@?rQ&)iUrvuWGYTFOms4v488RkfoR^GVfuw7|A8FcybWaA-c|l^R`$5Pm_&V`#zYt6MBI)FOcEHJ}D+Cw0H7L`JHdeGwZihpQXE=CV36xbUnwdjH?N| z)f;e_X|^C5$_@_i{rOp+KTE{dv)mBXIdpPSng2&IjX$-;Ho%7!lfyg)qbB3mQDoJD z%}0}<47BN}-@YEybQFW}_lemh|1GEfGpqaV_;U~4d)p=q;bUw_z3j@tCzdu|h516L z&}NY{8&ihzgZtW*_?cKuK{q|iDnXlI=uFh{EoNbg+z-n#i|gD$Bw|CX06^ooiVV_+ zkivN}tn3RUU*48AuNtc>zG5fE)`*m~+MXX1h`odoxrq0(+c0(}b2UOd?AhYuR4(;V z=lMXFqp3#(Th}TE5zxwJV=4y>U^o|jynBBQeC8E2%kXXVs)`{Nhx{Dilf;MV<7hpx zxzex~mVyy902C}rvnOs2fXvct8Arp`s|6tM*A{2X`vuBq?k>Udc85Q z8z~skrQYi-GduxYXKlZCU_Cq8=ENmKA9Q=Q*?znbW(BSwrlLQ^TbpU0{95{2i;b5| zX3jy7q=tOORILt-pbe&d5vQ1ABJuP%BBgc^nMnCjpt<4=_uQ9(*xW0IZfNTq2}2+k zeQzH&;-Dg`oikh94X}w!D9)Q1IxqxzyvH%Pdb6UwWzV&z0%)eQNKAFesO>2 z`Pry~@>ugKG+*lFX6#OyPveVS{L{YT563GW#?GQLyK*2lLkDrO`EOK@?aVE>UG&bi zlW&8R6PrKRx?&1~1JVe|_2xrgqN8ms2cDgQk_scTys!&-i{6B{JTx%zSK7CSBk(DE z9RAw#C1tt@AH;{M*8FV9vqdQ0r~|ldkya>5ESbS8z@rY5M~mBA6~&G1dX6qTnaNFy zOD?=<1#xMtn=3>5+k;M))MysXYxxpoj05E*ZDyp`k1~`{f9cF#leNzh#Hx{Cv(EIM zzcCd3s^@qK+as>5D2VG5FagnH488A~ojfy3#4UT-+TjO29Q;c%GS0u&J`Ohk6A|ug za_mZuIQEKNuQ2>EbPaDj4v>%5ukHHc&{zk714)mnyq(=P7%7(LBYR!j2c$83j$jZ+ ztPB^Mn3#XwZo2RnPyuVA<99La*pN`$G7!cq*N1+Ght=B-#1P74xA_#P7}Y-GWlu*3 z%4_iL%Squak38Gokv&2=Usv&td6hS|u7U~SxG%mthA7s-H$6V+kWY-N+K}sxS@50m`u`CKuYmx*vp-$r+LtNn@3Xq+b0#g)wrEvrRBjGT z=W}{iM&+@TzY!)%*(K4Dk4fOP&<10?CN`a-p0}vac-el2k?g}Re_w>DkT94|)v21e z`q;O(`eo)i0BgT5D^B4y*XPp^N8hMQ*OXXP6LuCG$diLn(uJ1%KjGe?Cfiv*{%O+B zOVlu$)A)>JKIgw`xt&JDWBz));uttUy*or-Ba+Vh2<%zTQokj95syfh$r}w-aIJxHE4yPSX!M=>z$MSR=`t-0m_>yz6ZG)EL?(=$5ld|IKO+Db{s`p(!8kWRy6}ls|Rl5 zO|4p>q@xcBc){zr0N(3Q0)eip-%#&l_ zITGl4K#}#QBMuNCQWwDe!9DH!sucZvJq-4prHN{k0J%QXwLh+P5f6D_--cV<`il}m z=&SLdo>Iz!+KmIV)>DW&*Lc(am=1IhXu@1K}?w~G=)dEC37 zXe{W~oqyQwzx|CxLg7$~){dZDTm^5kQi zME1l*)DI_e>7gR3s_dTCSyQc+=b zPeu>WN3<{=xqjXp@|$2pIAT?z-&oljlqVCe#rYEVO)vXxqJZEB_)~BhR2>laN?@X7 z%)}A-zZ?%UHA6Zo3rqkOcMbyGRa_fS`pIO>w4l^J%yI*vU^>fYZc(=0)T`6LqZj_%(@VRqgD+mTF zjW8kd;;QzUE&M=#mksD7R3R$-8f94+gDRa2O=LE{)upjZyyJzYxhT?f!RnmQWobO4 z#Dm3J?W$F?68+3AaUFTyHR;j*_wrPk2)qQDO+D)vp@FS7cT+>JfQ1tTnALVoj_;f> zg8}ME?}g8Pf%vYAL8Jij45)iWXlVaCm!x&ugN8m04yKYM@ql{2clNI>+T30c;X}Vi ziorAsWBLujXS{jzZ7{`vL#5^7$NC@_296)U^?jT$r39H$q^f zIBX`230x_4+2KB20A;1t<>#%LY&H;6@_f}4A^Z=QZ^V)@R3sCuNd#NKE?R-97d|B) zTIIs282j#}FA84Jdb}?Y_4E%eTm03bCET{&VC6xYaB3X#QL_8<%e6pG@EX3Hu~{BY#yxb*_CAb_pFH>`g;CNd~+Qg`0mz*$)~IDtN&*5QPFy%;Jme z1V!MT+xeIsBflHsohtu*IVN{|xn`mSxCA2<rX=_f-floO2%}hix@l_BBjiVj{l7&dIMCJj4!@feAQ$q7Qw> zv_KLxsobDm_b;)x?fP@UEmm1AZt>W!)Z3Z6bd&gLn0O)=j}l0$V5fQ$Htp3HKSgC_ z<4CAaMZ&y^c+Z|7U-al6hrk(M!_eE}^yA~Mfi%Gi@Lk;~Hf{xq0g&Imc9v*dZ?&b99_(^cQ+uv7W-0j2ZAWZ`*UXAQMpF77HQbOy*o)JZWOgdHotNRDy}}(cWP{X z)neO|aE$jQUuNeQ&ZwW8KpiSP={tPE8!`3T9KM$trt9qZPa$}K`SWJAn+Xm&T2)dQl#@Y*{nw{=Vg#k>;18#s8&<=)~xf?dF6P{#~^ax=Sw^Dpb z&L^|yhc!$F*k~#yRLXlEfmpXp+;da_M}%CU6K(zJD@YPksxeZ43ey;DgdJGHf+&+AAJ|L&6>Ldv z7{w@5sDAC5{<=27i6kza9j}=RHa(5lmR66yAph}CC>d3f&4ZiulTOX^lln~EAqT#3 z(j3v*&?xat4VTa@ug;^S!#`AY@c*DM2VG_sdpiwxW)^Ez3m~_zJMuf$1(R+xq#r@o zk+JI1hlwagu=+;l7P6O>#Fd2fhYo7Uy~!2`#?lhUU~kfe=$?>Xtr9th>MvG2=kul2 zviG9vW9Q00hIaaYvnRKUJGFa^361KCKDSKpRCXkit;(_~PVT1k58etRGMR=za%xj3 z8i=r2t(>{YM1fVl$A?yF7oV%@4?ipAANx$cH>=uSS-p3on2k#}PmWhf3w<4-YO&8h}B0o*Wehmc#_bSy!q!+4G-}} zekcYFhDCQq912ssyebyg`O{8UlXWm*7q51c{c^BYvYkH2l9Fhxr)$tg*nv1w+ zncjrjcsr@T*?KNc$IZsGPgmwTF|YUu$7mfPVdN=?Tt4-)&Dew?B?=*Qgz~}}w5IP$ zqjjdRA<6n-)|}1U2a*sc8W$j&qld72F-yRzbm|LtmK`{`6|D^OkKPx1>%3-a>tRsJ z%pY$y5kyRTLANbE!N`Rm6_x}%WGGrTL(AL*8okY7*DHM$dDL}a5h{M|Pv!XtI#djov(OQ|>R2M?-^-L1 zt7n)e=F*)-gdc>K$LdAdr%xqjpIEtV%Ld7hF|Li?lbt#r9?Hu~^y`!~m6J9547e(=YgDC2&3IdGe)Mym z!t@p5qMcqfa^Kw%BTnU?q0vE|^;0nJRf==1-A)ER(1I1UHuoJUz3*itq;}p9NbosN z*?HKYd@UZCMMN@j+U9#C&vr&s=Ro;c0R8L5^JZ^S8X^mOUH|r#nv+oc85!Ay#PZtW z(K2dh<2t8upr}wD!TXvGpMr2s*Y{3P#%;WsKW~_{Z1n49xWkBH>~&j#AnT75n^^L8 z<6J&4$FTb^{AX1`$X~^A_JZsqyZ@?c%m?osWnXB`CtM3AAe>~JI&Qw<r>~5Ujiu|Fq33JSL;}gEAc=@ zbdNjzD&fv9M)w`byndXBT~Fsrd@`j@L;O>2aoqy4e^OvN;Qec$Q^gS$FMll3dsVFotPs~Z#cyikdf)~Wkg5@sx{ko`L z{{)anx8qfTr}Wlbt?^^bD(#NzK+o#BFQn7BcMTF?wbc@pX>`en8JOW?pJ4rp9Y8nI z-iiS)fp^u24jCi9_#0r#`PXF>ZApploY~t$hgIsT^bh1OzdmK4zyw=Ol&}gs`hbZ) z(gitH05747Cg_rk#*8Mln`}G;Sd%~B$Z^p9I$q&T>4>FlzZlEl%m6>PV_*SHd@X)V zkBAR5VL&k@Fg-c2_SMbo2c5KXT`5+d{6`n_=fj)f#73&<2S6j&qYwchhbLx!@Jnyl z6aA(C4FdHYqe@Qsct~#NRNX($osVp1&5bGF&Z|q2a7%G)0cX}nWpkn-j4I#SHs9{; z1*i1C7RTQp3;J>7>{gs2{v(JlskQV?33KbdMd$>+%%AXZraGwVX)G!f?#eKtl{MW=W~zE z->0?}9TVYgmd%%5(lWZ!SV1+-i^w9DB43MtM?=8$u(k}D{*9|1V zika=nE4!0xj_RIaiPpFdvVp9a9qJ5~Cf7li=f(0VPXM)$5nhJ>@ZpUoOImjz`(_E` z4?xSsYxcPT6X;XAC3vd+*hw(`r#uu#0;x1AAR0U%U-gwx;ueNRhI$Yw1A&3Z4tD`~ zVMYLHVSAgV(Lt1Y9oRNQwW0DG%WAtngPyjg3_Yi=!L{f&j~4Vg&Q!_tyy7Z+`iex9 ztw!3m4(jw7icX&qMn5QoRrUgr$r1<|EOeAsR8M5xQNVL0ndzoCc;U3$+pr>qz7JY&E=b?!N&#YfGQ zLCAdxCF+(%dpY`!v!+6MKQ3=*%ZxS%NhcOWlHQp_v%h(v3^~IWf85`ei}7(}F1s=7 ze=FK<-OK9*O8W0wud~gT&pQxPBuTXKx%^)R@Z{B7cubXf??j{I7|31jfbm;<73YCu zSE%);PZc3~Rv9)b+=yrwDIkHss_7T_Evn9wXl0kRY6<`*Q@M3)04Xbu3T9V|YWu!U z>1aL3e^p`DfQIh_V#4Ww#|^8-tCr(JIshqOJgqPPop6xxImlJoekAm;$P2_dJ={#j zB;`$fqS_%X;0Qpv*f+-2Z8*+`FngQ_VZ0#k=8;^tWyvEeo{hOKqXm#_=8Q4|XrV>G z09e&*3i4?T?{~*hbAk`3k!mw$t94(Fq%bC=avJ|^;(>D5^bxFR<3 zgFb+MZL$t0Do5cCs!%NmNj`l-!hkW&g1{6H;~GZNZ9<^fwHgL?pC6d&ByM-VN`zO z<&iA)m^H1Pl@uv;R;PnSwJuA^2jk$f4B1^hp+5NB89s&mG@(#AxF0QRa`hNA*}U^u zQVmgZdHlfr14OCQD#XG*0Z}kTkC2sYJlNY;%LefP(2kiH?`=YHyZR}-tOc9op7tda z9JI;(YvNqE2W#DdO79H0%Hh)eK#ncqa2Rx;;0eJAC#G(ycQH8uTs zX`Nftf4z#9{ed+M>$U|0He;vsPN^Dh34L%dz<|#FS zMwt^g%%60OwE74X)p+-)4gUff4pPx9lw@%j#Au)Lt7;^Ua-Z@O0xQp>|56G4x|4>a z$?P8%-wa6|coyYwy)UNUJaOwyiBbB@)Ld%9My7zC$`0t!@4f|*u$g;`EQIO4rdZB| zC{|z82)|JIh$A}nq5uzh|5T!HoD>f#%AFA%j~!+Ytf05>Wa-J}J7V-%2T%b=4MsL`OGj)*{f zK0YM5q>q9&!AZBS4)Kbw(yS_lta%MkLQ|M8^m!j6WIuhReS@X`vUK6QMo#=>oTQ>1 z{yQ7jZ~ieVm`UO$h2|ydbTzXehtT6A8pT6=Y*~e}0K8IWtN)=p;Ws^NV)jAW0e%c- zb*`7RarAl)jy$~sqj>Zqf4<7WtC9%5>eX7Tg<8e8j*qi08F=0AL#guBY~*+P$;56Z z%>LryV3~ZgY!yS~6~r;(bN*Rfm|Xlxd2v^fi_EMcBFHm{UArcSfWttit~7R*e1qDc z*=AZYt4W~VZ=ng}Jw1S4c2*~y79umd*)KOPj-RDxc`vE^8qvRl)cmuRC<_TA<4@|A zrar&x2UVDCR~z`rQb5>?`9p+Zs1}U*RNo6!MGbOqHnoxyR)@&O*I}d#_NY`3Hz{vL zE0%j}S`tcRx!y@lAxPJ4pe@%YZq*yn=ik1uq57IRg-u=T1D@No_6FjEVZo%J6?Yqjo3kFo>+J9Gj}>lH2JZJTi4qXm7_Eg!rsAVa+cJN-%vfm9u|>1tqku;2ueLXu zAL}+A4Bg35ks^k%51IVLP9iAy@bm@$bZNsM-g7V8OnWLM^ZF*+>5IHm_x-=bcCrc9 z?UB`e1#lk5V9DW_vxyHSG-$o))mttmfNd}Gd$`_n8)I*6qim6~UzZJ*DVhuALu&?z z$$LqJ$^B6iCjGX291nXS;Dq#B8@L|7a zbW10Npes2f<^UXpQj|qANG7#|j2sdt>yw}S4b=Wun|vRh5@IkJgG!E^dc6tL-LW@h zzvFC`#w*$wO2o^OcL|y)1po>YUK>;?r&1aOoIGYQC1(H?q8C-h>^ZqOLi`nh9{Chb z080pw4miS9v(WES`HOJ#ueqf~rnssL_a)YV2tt`B#!a7$B@G^k>iQzw>DC z;ZFJU`{}IStqv+`p_oti=x$C#?#VEo0vvCJ$2^xP6CfsB;CqE}!o(Y83kHalk8%`x z1CgG61?b(^WvZqMAz9Ke4$yk2+k1ow#ATb)1u?mTl=It0KE*iQ*Sjtl7A73e*4Tgr zk_S<^b-$y2s@52v2Bo(+3txBP2O=!y4=9-EO=rN@jRUm>@HL!M>0xue5?`1JW;}q6 zRWT_YGdGtL6Yz3e7|V6eU-Tw{=jM-6(6g@#aVZPw@|`7sf_?|?t6YO7h}`zu?HlH! z>r(%-i)FQc2J=GN+?$b*jjN|tu}eM4&WWy8Ztwl4TNpU==kK^Xif3*$I`J&9s|mmR z$iygQYQczn)!y{U5gHw8hDG;;5IyPmEItxtcBcM{#%3E(8DvUhclj;pYOfSWU*^gN ziL$HM<0hn|yqxB$52;8~_tz;bh?Iv~1p?uc_(=5_TMFUui{}xG4%v`Bs_0W(Ek7%B zk#u42Z?G0(in7yj$7dVK3sV?P2$NlAoGDKN{P@xN#2~1OWk&enjNsKf1_^3NvP1;x zc`}kEj(HSn8xKJYS6m$i{g@8yi=e38YA`$W&0LmVu_{Uq?kL45p>lKxfB{=jhdygq z$D;%Y(z)sXD{8Q8J+an*OyABa_-L?3Gu+)lhNu5T7|eQys=ra$@7d?#j2 z>fKz$*G(~oiOZLi3~+wkF&DsX`wj?;Yqyg6=^<2y%Ig;{jRxgPp7rRGWVtVhtKS+! z7Q9NyQvE5f=XaL6WR}EIh6NiyChq$FB(T;17hgW5yhi%+PbavVGP7Cxa9+~HRRkQm zl3;azqIjK+hRKS{v$qP6mCD$EJn*1!OtFCGCi-dVWYEj|{rvr-gJt~`wBoZ?_VuDL1Y7hfe|vAQ zs_A04`P&kaL{DFr`O}du+~>HtCHRK=GzT!Xk*8iZ9bD^@T_bm;duNE7@#ixlRF7;5 znNLC;QrB>}+&o5%ra1jk-R*o8>DToG^Ai7NLt6|wpH{ZOdxkCIWb*%p1uY8GcE+#K z-E0qb(K!6-O&x<3F}wV90r3YCMHxKP0St?$>?fUnRIdI4K4EWdz5Z*z2kxIa94_9= zUwvCo8iWNWsF!X`L?RUN4f5~5=sGnxPV9PoZz5oh2N~P!)ZS!eU7glEXy&=jWB|7; ziw<4ld&B=OLtdQZCU@sta}fJ|8q*v3cD~wnFK(}BAUvT$L_kok#I=-Z^2#?c)|n;Y zlXiN0W%m2+U95@40IqB4AdZ*O?cb?I{RoIW^7F;Hb-Vo9l`^>`m}vkaDgEEk3Ttc8 zu$3;_1ej0K%<92e5D!X%k3dGUChv?>GL`#irlxQ%7qR5yTp}qu13^w&QEzFC@4T2% zw4=oG^90M*A6+2~)`!3C+)#~4WrL1QBRtAECeHf$k0u$vjm7=0wHV64oB?KwpDq%` z{@FyR?Cd6Sod5N6uo<+e5^w8`25Z2MzD^ z>02GjQyy7V$Y(>28+f9Ts8wiFHlh*c8ya|y453@iu6q^86*rrmofp3ZO4e2Om$iPA zaSoGxTC5@|Z(z;V*Ha}P6hnh}=*l6WAsQ%baAeo0C1J=YGWS-#QgbmUzO^8r8B>5O znU1K&2V!1c#7qv!%k+(W*;Jfp{DPA|Q>gfsgdsAXL!v%4t>AhZ|C>bz@R@rzI4WNX z>X5I`Y%(s^sg}n$h0iIb}U?s?CX1VL`7DZ!ZVuUqLEX?=?Zkf zwUqHc!Xy|FRjouv93q_lUQ9qjZ;sHfF;X=$!EI^0(GA`P!-T7*=%TpSDHUt>chFGJ z1^~EeWmVM^F!T3IcTg(a0I;O|<_Qs7Qc|L(p+Wa#{76p5`z+?1tHGO7&5Fi`Z6irb@2Ofr5gZ)KGqoDetR+yb%bDlYjGIe|SUxUGjw5fzl9hHWb)=i1NK?c6@ z7p5<9ppVKdmV{t zrmT#~pZ6>dfrtq*3`gQ;XL=Up?51vt=q8Lk`Q!wq8h*YYHcY6?046uSZyx$xaso;| z*j3A58KveKJMiBgI8RcwnS%2@;UoiW;+bNfSKq*-~F8UT*b@O(A<=@M) z3X;KplXQCu^)s$u44>sfKgRj0Y%Y6+8^d&5F^4lF?S)X0HLGCM~2B1UpC3ntcA+u@kW3zk#Kq zSoAfJVQW?6*GQ48F~r1Bh|PUCvFKNCKPn_)#wWUQb91A?w!V0g&X{h3BW_%FGDV3Y z>tIO=J*7q!({EKQZh-LDDH+1?nh_s1ZE7hE^6{Dne*KZ#iJ3R#L?cM0K6qPI?a~+gLArKHba7F!?_D~^?yZG}&$E>>k z9vhCP=sCo&DqX+?7r|cre1ghPh&mjEM4%`U(qYy@P%OSts)U-9HvB89Rh+Yf&F29=vS9cw)s1!_)|v%J7D&7^Po$uABOjX^_W2Tt zQ4DBKD8=Xt9RJ%6O*Q}pa93!tgF3;mg@HeB!nzFzFuPZsf3{TiRqOKG;F8h<^c`DB z91T$?p~(m`(_@%SNiK*I_xu0p1Sax|N48ZR{zI_m4@MyB&k`9A%bP35yuynd)8Io- zO>?k#ruYWrJA}r7f-u{jc4?7>ked4O++gX|Dp@#^a%gYFBMQ+=R5(3GimDh3n^uCj zD>Da&7QRkXB)t$6{;xk8|91xjEp!o`R8=j%yinZRbj`YKO}FX~_?>+{$vgE*&(^6C zQG(I3#P+rh8#UGln%H~wMfKxBQlKcUmUtI29t}dqfG{L;+q5Avbui#9p#w9Aod5Wy zFrt0+Q%MiW_Q91 zL+Z~Znf4!0PCg|s-=M4ATZWD={b~NCY<+9MaQfuGKo4KE{gG?O$56}keI;~27qXyJ zPa>r6yW8BO*XI69jW9zdR&iHYSLz?E$zdsZsjne24oFK6Axy|~^n<#lD9!K<4@i@Z6tit#8W={Dg?4U4Ozhl;i zqjK%I!cl@|C*Ce<6QiM_H3VEM>$U@*UGCuU%|{%$;(QaS;WD4r*yVx%cL}oga?RLN zQFi4@_Z@mI_C|`e9R;bYL@va(si!j-NNNY0yZA&LeZ)4{e0k)+XpJ_<_u2pXru?<1 z#V;_0Zmi(L{0EeqUCKxAI_U-Y3oajsp}n&1lw^VOM6yY#yJ?Sj?OWlrpn zZUb@2z6IOrqPJo3cffpBfy!LXL?%@eyd~J0^!DL_r`-j}=kL|y$FT+q;tuB+Mt2l{ ztfv?Y{db%S=C~_qy_l>h1g&rNDQ&-y@U@Fw2OlWdD)8Z@4X1olhV5Tn9#$sThKVHK z4TRvEwE`O#Bjme5vO=tih*=byj$uo7wJcw~hs|X7U;e$}2@1U`%FaRVZUR4nYsXg; z>@839I=~bg@FV~CPQc~B(oK1o;&jR}V=PzuT3;f1UhdHE2RH+4Z zM*;tYH5tP?TLo?q`bMEltXjv&PL=&*lN6sPIqhD2UevV(bc@zAcB$WNb3-Bl=JH-X z0iONv9rRWe)zwO0vQ*VrV&Bgtd?FxXt^KOq(Zz`Qy$Br`i+psO?b42B3)&8`nK_IO zwVYW~Lm9!fH%`@6ox8UPTIpqZKBJGA@&!nlUbD-|kh-Z7Rj2x>l0QF1tUX^O{!T_I zdDymHTK!|K_9)}1K|37*&WMn`W`1Y$b8}F)W~_Q_CJmc=vCU8`%X**jh87+3CBbjq|Df*LPThC! zKO8h)`MlFCX-H@&LQg4jSzq8UQKw|EnA3(V-LyLtT-r+K zW{-}HTF};c+yfAYdA)BAs3^obbSW^qTlI|?_+Q{Tj#63kR@D;Cs~7)9QJZGJJDJEj zgx#pAX4XvjY0c%Y)y;v9yVtlQL{+=IsZCjgYO*hbqTx-}M?PF*RnTbcM0$su#mO6k zKB%~>OO$Ig_m{btLgVXFSkK*$rT1CRy= zA2CPrk)A9vm6eqkc3h8IY#LMnlLt!4%0J*=METxDkXI(g%Co&33;Z2hy5t(nUEZz9et6FPkimY3_{9ZFa(h_ubcgam%U|6 zdAoO){g>}}jmjD^4!v|lz74G-EfpzSvheYA(RNHBg(2M7?9VoEe@3kRK|=MC2}&ob z9Y)!2*P<$8Mm(siF(J$ij;_Gmq|(v1v6OdI+7EbTpkFUgsS-S|!ljNjw#8XhS@W=C zICIZGi>HOIi2ko)n){`eZP3>N8j z`XQ)I@a5&JPfX!R`DERqj9(w_jX@VJ-_i&6JQ;^2o$=5HS%jEBtwI=WLC zsgLr6#^g*)#+0vVJcPuO@J}3)mpxXtSL;(Yvb(fJRhKicREJ#a?kDawNLNph_&A^T zK+Vn5LS-ukFWUORVrtAMd%ATfLz~Z6WrdZ)`Jbr1^ZL)&$=9{`m=;z3)r(@Fg}N%M z^MNYh38YnYLS4F$&4ah*9Q@^CMR zJNC=2*uQpE3?2gm19pM<-PKLBL4M#M(5NId7FTf>3hpK{xDbQOUA478kR%{I%V7Dc zYWwviV;`wIP0(v3&WF8REme0jyGk~dFOdW4T1q=Zo=M_7;2SL;fmiFAiqPO>8cuPn z^~HKUq!*IG*VZ!4cD%|3m&S1l+1c4?0K&*x!$5>RxCT@td9XS)Rms4}Sm=pa6k_&< z!E30Z8d^KKv^j_B^6RySRVS?!M920GQ4MHz>$q;33BXP=wNZ9)7h1mwBm^f|d;F)!fL1$y5m5ozvn1xs`m*3Th z0IBxsSUE)dukqYyps{bOd)EmTbI$u#RS9{7@w!GT3pihSmK(_Mm#%Ag-Ku=NMGEmW*`e0ehusU@|Y6 z6Z>tT0sgLFzC9x+g*_{pi6DJKB#>dx{$P8yJx~LGI^s)huO!t(ogq=}sf^!-Jp@!? z$bY@ku_S?2ca%#;4^aklJ80Ltr^F!+CW5T2`viB`#f9+OFSN{^|DbK{#s3T~T7oj0 zXlhJBnvP`w$(HadlN?6Sx{E_$#G(;&-Dlc~A}^X7fgWK2Jr|!B&J*RT5H1t>ZYv&R z#P0<~pntBKPA^_KpZLMoP1NgY#xcw9;T|)A6WwSH9y%|N?Ps5bB=j9pLe^0Zu{mpvo#i`dY|7QFrJ)| zL3}`v5Dqg4y#E_A@qUZ2U+Ax~kg=iLQbJ8y9;YbziJRj$O$`k>UEM6cbJfIkJn>O; z7WST_z$3l*YBNyv)nd>+RfZ~kTT+Sp=}l;EI@a!0fP<+d&vKdLXxYOB5)2OnMay(& z-o6Tb6^Z)UAdRQ&8*(Cc7gWLN{Mm{_$Qq~z+bEM_RQ!jA430H6&N$)D*$v)*tn~CY zFDjc85w$D5c6==Z85d7@i9VlN4YC=wFVIe@o}eG`i2U?iwtQ zf-I2~&^EQ6u7iC4m525@y|0(Bfa}F_lWkEKlc9HhDRz$;E>S}K3z5o-FrTRKNfsuK zI-+c~^~w)k6dcQ$Ger;1C}~8Gm!|o8L7Y>+f7_MBnS|J8ZJo82mdYw9D)tHI&%pyO0E}q}zAS)S#I2-s2|&2uP62M4#CWcdwq@d$*Vzu2tAR~h zB8?XH@D`%h(CDbb5&tvK&Ofu|I9h%>R@oedCGS2EwXQefj@T;MCJ?DIOAmtDWD%E0 z2y8oTG6=@Sav$yhK{@QRwcL7;#z=d6J1S#_Y5>?*u-E;h-lxnuSbyLOxVx&>b6wr+ z+k34D>=z+pW~%5~E4Wcd9tu8Ur$b&nW~7^IcSci0Ol*zTlmC3xnYyW=q-12jURcQ) zldfSvO$6TUWBvl0=HEeh4FCY~cL&r|qmp5r8C|k232upBa}_BK{nY-9tA-}g=;vr( z2BHIxP~jo^Mn*LaWXr~u|DLB*qbwaWvsF?NVT~Bx-7T55LuW>lk8t~L)ak_)6)XF_ zzA{4dn_d^Vyw*%$#JN%hCa^_Rs^?GwV^*ou1Blm^cw0md{Va4WSf8W^LzkJIHCKPq z<6NMwMpxb?r#izUNxT%ADZ-{6()mBIO*yy@WSF-NED_CDh-*zV{rWtT>AfK?MN*qM zZTUC#@X?U7ZvmI)Cs~{?+1Yn5>v7rLMggp1^(%HV;!H}aX=^ir%bT4|&TF9FPqn|dUg3=O`s=l(ppeLh$ap(F z@+CR#)8ch(kfjGKrkUR6Xkx*}@3aSnUQBF__<0++-C>xf6#()P!3|7SRVTW&f1q`8 z@`|y440MZPp^2X@L4etA&%HXrEF8w{stcg(a&TryU5m*MIaxe%fk}$Aj_%#=aL3j?06_B9LVD^dO?JyBWWZ`BPx(Q7l!?W`L#3V4W;4 z>op6QfcDFrZ=xnIllV`53smjb5U{XSVbVE+i_!qPtEiE!R>At)>IDn~(Z2C3xJQJ# z8mPpc2^R&|rDcx}k1dULwW&uotA?hGMP9Fxyp7076X)nIN}X1Eapa}c z;#4AyJ901G-S0tNcel^;*=L_;d-gp4_uv2b_Pv(>E}4ZI;X-Z@YI)N`EXy+Ha97B6 zodalfP0h$*V|juKo$k$fs;zn?unGHGJI~DKSp^PVUC`6hW1WpN*h41ODz?;DAXyZ_ zA$wHiuPlZiHuFANaJh0%2rv?S_Cy=_XLY`rQI5Ky?Z}S3f;JP!Fl~NmqP$K3R@ScV z&nV-}pH>`maI}X7zjNqz^KxCp1Nknb9n3K|%hBK_0S?>RqpVbGwaUe+sw^;|)oOyO!kIz~{ zCduT`*ufv|?-A`;i@J}i$vv+50~lbr1qF){Hu-@5n^jvA2L~rQ?l^ro+3`ravbSNk z|J1ENfG5HMl(uT!(bPABgK9+ivnYf2^8y-m5LLtz0!HxbrWn$>55>Y&S&(wH@|HU1 z?bD!zqYDf957rqzbB`?eknmEiU?~H-ox5+T0v6&uGugot47x+790sv30eIdWr?z$@ zlkOG~QK^t6J6%rV*>-?dqioyt1j*_bulBD&C*MeFl(#l`kPtqcGX`!Jvs?gqaFYUu zJC(*=t(Yt!CJaX|M08hyfrb6DEG4&<>+9}w)q*5LT($5I;9glnS_OP?KM$vAs`|C- zGlOZ19t`d>#|{ObjAE`cbTlzzb105hC6+a<1A7VamWIUM-xUODqH8U0hMn#as%_XQ zAR&V1^oN$IjEL%iw>umQopz+c5~st)!$xh6Sp7oCH@|T+9@;O>^uV zo>^cLd;%z+Cm$fjo-H7-0V_#L#55H+Q5 z1xZvy)sRd1oX^!z%H$Vc<1NZrqvOTV9{5!n@ltPlkFcT0_xRqaAwW}^f^U2-^hmse zbR2A=2-dMHe+Ggi%fK6JY<$!40LqOLK1s_b~`-x`3E6-QH2>xXNMEFwLS{*C0krIKZwH`Lka<<)F2EidC zPL-D~4JlkxquAF_PnRUd7XJF{udWgpY*i?4+tX^@VevW{V24AW&&8DWvYG}UR9XT7 zNQ<7R^c_610YfWVfi^bvizo7_Xi_azT2}HRbnE`f$pf{DobfGn;T>6^dF%9EJK@|Hyw0mt1Kq6&-*N;qks}lclxFkEbXY(4rql+HnV>a91^&K#I>3cUNCCe?)N z{^;D4ll>7c)3Ot@Um|a{bZcDqX33J;#zszJ(pIATBbc_xnuOJ6v9huCQD%5as& z*ClD8kbh_2j4XKB7i~VFPDL<5i{nOp`f7udTRO!M9G?DWsDmc8#t4PN^oR-nij9GD zb^0?N0kj^P9=kKfeiY*9hHZ5nA4~Yy}JzipK zzonU)cASwdnZN3QE5$3~AQUPL5?LW}JT~#;1r_3n7k&D?)H9nL`*L&Yvw6+fqI;Vn z_U7AMST|d5kd7P=J_XKuVD^`-fbIU|ixWDM6+Hn|Ffc68c|}0lLXiZOsZjY%pU+&& z>{jf>x!pY5>21?0u}zwYiHX@A_RXcc)AEJzV+#6$YEkE4-NMqwkG&w;>ru4WY}sqE z2AnBZ1&wfoy<~d656|~xCKl$(f4a`xr8EFAbFw2MEU)b7LlWEmb66Y)jb%zK?HqHy z7;_hZ7$D-Gb35DL43o|52x-7VJV|NjP0cH6qVKshSLEqlwqD#sB$xGBow&|36J42iZ@GSnbYZYpS#D tKK9Q~&#)R?(LjMyr2Og3+=mA2pTv}D^l@#G5gr9E7nY|}&6=R3zX8*sv`PQ~ diff --git a/vignettes/Python/RDD/trees2.png b/vignettes/Python/RDD/trees2.png deleted file mode 100644 index 90b7bd5f12e4333dce7cc12b5e63e638d64a7804..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24832 zcmeIaWmHyO6fUX=f}j`(s7QlIi_-ZjN=k?V(hAZgT>`!~NF$)MN=ZsfqY?sAFWo5J z(hYa+7ZmO}=iYy3j5E&ozA^m7&5pI#nrqHyKJ%H|Ls4Gx7~wg>0|yQqla{)!bl|`t z#sdd%o)8>{BlTpAgzmaPsD2@pV#<^x zzZe5dV38y5gwV;|ybsyysb8iqn}7pZi1mghqn)qRI2v2LiJ6 zE7L{4T50}lT`KQO#IIjySN?cTB~oEV+mzFN#6>q+F*%`GEP)|NjhCj(V@NB^N|c6J zBkT0+5yO;P^6+`{Vzb)h!OLaKW#Wmcr(aE!H9iVSwX^DIzg1%2-}TaSc>MArSEAKV zU1Bwvlb0E)jTt^m@vZd*8aZ8VKm47F&Ftz^;;?RJr87rQdxYG(cH`(R-6K|s3?+** z6!zSkWKz8is{TGIf``wOpAnrjB3Uo}A^gy_?6mA^UUL5$YwX!iwNHr~bQTM9d1x*j z;ieXu>6tAY7HC+$uDwjsAEn>;&9mJWrd&n;uC#%i+yNH&nc%>|m&ONh;irS}=N$Zj z)d_fU03ZH71%IwbYA;#!Ke}Be#f}D8uwz#x3{C!)`T3_G7#>mokErCE6Zq;gh z=dSHtxtoG|mgXGyA6V+@b2yqmL@qfX>?jC7n(N!%r*Sklv#=3#6rsbOAqYPshdJqJ zu&3CXiqPGaQ=}2MwAQEL<+#LgiB6P|hK5Gi`hkI<(sha5%i&)lbVjze4+S|n9UL4u z9IkL!S{rh52?z*qUb@VA`7%44!EWPZVSC?^-NJ@`=O%yeb6wv?&)WE*t+AyA4RYW6 zx|VjfB6M`fgZ}-uGfsU+anorlfiM`AJ^L-fV~EaR#pbdHY*59w+Qi z__-cQLNTBZ+&}VcurJ;qe>PhuSV`f%82or{bEXH;RtVDl!l%BqFO`7bMFHO zac~Jv(4Zc}564c8c11fY%OXGT9z@?nU7WqP^K0y(==V6+q{Ua2<@Zi{toWM;d;QKe zC~>@J0Wao~!uRfS@O_!--uVa6F9-=3j}55`{CmtnoNFF!`_9KXMtVZ#Ty>iD-*Ld1 z&rYT7J0JHP3(XtuH#$TAMvqHCz^Hv_-#@wUpe`C;EU7-S`+D%J6EcN||Gq8!(%_JY*?rrG8 z0yAFC`LIJjq!xUyGTqe^cyXvW6rlR=>!tFbaXUCJ$c(YjLNMr&q z&yJ9s^Nh8|sb%wfQdIKZ+WQ_MW6m(iABW~gdaZ$Es*BqZ%2N&s+N`yY%m^91do_z& zMo%h={aanMClLW7sr$iE(Y-?waLAkL;Nsp=C470Ts5DFdczW-UQ5LM{f4BN?t`N8J zpD`d_3jKeFF=&z9{e16g;aoMVmRm8?sU81YOA8^OlHZsST}{+&-({ZOW8YQ&=ht_< z!0M872M{lgvLqSp>k^C+!zEadN3-X&(-m(mrd9rYcSW!L^ZlaCK<69SxWod%&d0Ts=xs>KGiE)@fT$l1yjMUo<*(drle}mGYB5w@b%c<_M#LDKD^_X8ftK#1!(g| zFHYB$d64KWO?J!%Mr|+*|9lr%bbq70Qk0Bb+wp!;*oR_5f}8IyGs^S-dPcZ#ha4M_ zP}y*?iMPZ1qRpA@t(h+?zh7r&GPtfSRMovXdXD>IPKQC*wTWA2&% zlXefZeQp(7pUmWfVNH79H>J188w$P)|I>}IYc=i}5<-8&#l&jhQ#sz7%&6f|dpCCZ ztIJFg9~Fd$Z{vz5cBknK#89qjIh~0>1<|vv>r39=yqyUtoW|c=CKv{}X*(pMrO>Bk zFdhD_Qyxr_Kd9=W#7gW!W_Ch3DiFYeoZv%mi>1-Vk9wt#51NP6{d~(sUnewE*lu0J zuGgBZHv5C#mpk8MeC7s@@!9@`&`y)2lul>6dXeQaC0TaTv1}vyod6!HQF?uOn5yJO zw%bsUs?UIr5Qn_TVzX38V~j$I`^EsZWd5^>wvsJ7FKV&k`{uW;W=N#oKN$Ss-=S(c z!%?y^XZFEGP|xt+-2h=s&zc%dlq?s`=<{XBl6aT=dCvL3T$;oe|2S2+oRo#pz@6SC zTooPH#mS$Vrtw<~@nZTmp&WUG0j*layP->t4Gej_o z7vKHET__JQ$y3K(#O(o~ zgbxs8%Wt(AVu6^Sc>j$1woAA3L<*N>Z{c}Kp=WUMYw{B^WR?SbJL^@teWcs2P0OnN z{>yS?y>&LnRr3&>fFLPF`RCYyp?jGh{^Y7G3n^ZR{W4!Cqz9Ia>_%lZ^m3oOn6~R; zOZ8I8QOevkm9r98DX=z<^1y?gMH0xQ;B%#3_{vhdp6{vLEC+7$t<y#vA126qbel zJz>V*kMViI?tMl|V0xu^dtPjtgoH`n%hu_4-_}xY=baDD@_6CQV{d+t?A$dQZ)+~3 zYPw8i>zf*+nbenT zO_Wr?R-u4hrO=wz_))U5hO^|nhGmgm+PGnd;1#>^<}*U}W*wo*pOy$1gU(q*Fk&rN zi~O$1r&zioX(~ZmqsgM>UaQIU%8C)O?bTHB{&zj69H;X}elIjB_@=o|nVMI|3JA+` z7TG43Y^|BE3tv>ZH{G4rk!XEQIz6?AUGCyo>e~^l?V$;e96fW1hRU%Yy)wuocJ>fH zu~)HckzGQ|pZKBRCVh>mdQZf46(lDpM2Ti6=FAQCf~V~Wk67r82c9B6y{mCNV4+H;iu9)7O9kJ-b3)4S<5IUxeYZ{Jp$zgnURg_ zxX>t>DLq4B_W7!}X{yC6;!dNxr2HCF?yoo^C7OsSW0(e-0SaGjCiCtqfEV(s1ap>#CNYmlj>`s5h`l=(KlM zv^f}J*%E#iZ)cM_;VE70(0$DwntMnNGuM%6Sb5ek{3H<>+Yj~d;_a>F5?@ZY+1GNI zr^99jp$Zw`<2jmFY!%*SG1%mL46)~SXEe-gC0$^Nn@E5bD6MZu$_s>3&JcEVZ;UTWTOsFYSDVs>lBcw$n;aF}Ie?G3Y$2C7GYzScGm##x%?FV`%L zH6hX6+;yxiO{W_cbGDi_?LiGr?&i0rp+sLEB@LIc>;eeJht49{A6r9zZ)EAz)k#P&vNiudxe22?32Vqg{H(oUt&a2pq4obkfUx7ZO7j zO{_M_uoJfsn~K(k)VnCAMSS0dFX7NjHv3&bgxCy>>wSziW}J54h2n5XoJ?q6BAgHc z6U%A5xaa9<8s0<5r+po<LU9t zw7p34#`cFU)4sP}Jayu~=|bjkoKaEgqSN-~@Aduxmut$3*QY5q*A{EqE=%r2Y7Rm+fIYMI+Z*+>v7D~VDr)*clL_lHC1P$%nNj6y z6KT{@o0EnGcLU;)3sCg9&KtA0*6^cw{9=fcvoJDNqwilD7|OaWCh72eP#8xH8tr)m zHYt;j@Z66aR6u^o*TWQL4#o2kXD&aG(sUXMA|+|mg16%iLHUYT-Bu7GZHiaeMx{x5 zojEy1ePm}ZCcG>KR7*Z~IrT7^umu6Q2LMGVAMpo?g<& z%RIM-x59@qOsg_%>;9MZ=>pkiWjSPzpnKUKmYtL=ljG|-O})KRsn`9*D!rDstL!b8 z9-UT2%dNX(wPb4mi_r!oqgEFM5H~sc(bp%;vRL4PLgabFa4QLT5Bd(ZfQi(CR7GD8 z5+*YB$ZrI924j|D#@G z7&0u}?n!zVW=s$4slHfUR&;w~KGWUzroAQUYy?=_5XtClctCTkB(*E|aBWzw;!IAj z^MvC}(K1=*2tL+=6j{-EHX{`z3c|DK-z9TFX%DvLUHi%Hv5v_Y*ZEQ00{}r*W3eF; z`S)7@+WWi4X8-DYG+*nu^6RB+)2(AW)5z*4iYse#uIcO=cO@6QbP3Ex1t9iJ)EwBz za&O(lSulQQG|IRBO-$7@W1L^qTYp}j;Wko?@eA0xDivZL>!-Ve9kR(B7h6;ZVglto zk%(9CzBO;A{6ovWgAubSODww5FJ&lehFG(rHkvAmS-NcoS$wqvtL2<-C&ySL7^NL- za~U2w;cM5xNeyARDw_<-(6t})^;B2ZB}-L@uSjCkIude z;C2`!(pkBdb5t*>0TKsBzbxGqEYoRsvfZ*|Gh^s6#QwURHQWGR!sEyQOt1qeqTn^V z+ZC@AJDpO2-2`c5cMT)#b^-+|IuDt|n7sU??0qisjU4JCbJr`Qy~qH&0%$Lahdn!p z5Bq|=;JMA{KkW0ZEEgbEH7m6X+40B7mg|7%k>UMnY0sG_yao53ey|PMMY~7!NlwUA z^U`(hMUQA*2p;8!H68Zi!heH;S^YnBrbcyg4jb=S%f+NpNN~v@uVFKt&q|WOQKp}uCZCeb_G}JU5NLcs z#yQ-5hfJ6OEhwpW{4-2k0$X5TY-I+I>^sz~1jr*cHDk{-PmBVc|1WLZ8C*gvn7OQ$ z{vHEN)9?vU46{Ge-d>t0t9UpR ze?4*!Nj*lf!|eR8C+vMSj?M5-4~Jf-_-fp>zx9!XJQ$*lE-Cj%6d~Qq^VU*FDFwOR z=EM59^~kgSrrr(Q@SvunK`&Zw->YH_@rnYLpfT6756J{Yl2pidCpp?0XOp#U;E9m1 ze)XF#%e^jokReqUBTUf~Q$@{>xLA7T{&P~@sq8k3J0#6r;%LZD_&C1wKN;;SaR-ng zX5Z^Ha@VSy)a7pC_npZ_e}H_rig^u5*Y3ZhBtsHxdLMqe3U3N>BzYBvkRl*!d$$7@ z(;jD)%@bcr?<*V-G1PhQ>9OCXhmLze80JGrl;KG)m7Szgo1_1xblt(NugBd^KtS=qbNU0 z`ue=2a=utF@-w}Xs!KScL1||`X<`zOVi zyuju4XGKS+nqN#4Avm{3?8nI+H-zeeot__<^w|t2DJ4gp427_+lb2I%muI-+8EUUe zy)WD4N&yCFfIG=4ma#mAw#`#m&(t21c#9()cr?;rH!+jrytkMVM}3WQ&*3z5!=AVC zjcVO9`!Y6I!GM%%p4^VYHhf4ID2EV6T~K^D)V zcy;*8uNM@~#xu6BOTXb!HL_zLE+QX{gRCn?IIfrY(-q~CXev(p2^PrC3#TUEaF zT|o7*JmTGV(||DpYO+gMP1j*zyQbr%UjM&#%P}$MU{pJ5Gzjp zeNFCb_kb-%xa%*#?hNFXY?{-DM$6+IJNYm!hIW$WFU+!Kdx+472$yyjoaeWUQc8-B zu5|!ay?k-b*LUIpcX}zXn&j(KxjKb_BrPJ_%LWq0dH_`Aj|}|{X6R7Dn4nyPy703j z;WqLJg&b$c9F9`M{mIN+BV~lYyJXQf`pZZZykJiQ?Md%=BEbtthM==1l)EDey@8@J z4)({?eSk51U!bKuzkqGp6*%5iHkA-BxRHY_AG>85OQtnbb(j9>xpDJ|J@2P zvj64^aX|mOjRC!t=VQ`v72v6vVr{Jkd*(1@fq`jWn|h}A+ofUBKkD_YBmA%&Y}(QF zPlC;7OO_~hJpk5Vn^j7OG_n_!1^0Ql2oQHf99ZSqcPJaMrO8B+?Y>I|PXgRLIA^-= zkO?cm%~sX1eY?Y!4&WxvVB5Y!%_;ylQ_?W|;pTOKo47yH_8m${0W7DX6T9cJX&O8L zZXWT|*>~t~FxV#}lMTESO5>-}XgK=wCCJ$wPqRQd=_xBt{?vd5uj z>AzX^a&ASR6xX6?vDI;PC`2#Hb;%>UO_>4vEjY= zz(?v$qb18`2n=mMdtm4hgbSc^vRh+QplNuGH(VD%0VR%~&IqM%7++fts873FPYdB4 z&>2T!WV0}>X*wm7>o6ZwFDdQs+P5-v{Sw1<&I1o!^ysl&nhm-Y6%Z-lG&gvL&q8wq z-)W~7d{L3+xqmB`>W~bb;J2S_$1znrjkX5W$`BQGO47&D$&SoScU**h$x%;(vP@1T zqMm`rIuqQC5adzOvTH#?Xy!O3R%^N2MnNP|1t0wa7=dFSRjtnAA_4ubO)U?-*0;s5 zzKshA5?=WsDH_0olv2zJl9P^_w;V<$X0!+=KEhs|H(g}$4;X>3U8`DrO6FK!Ja=xH zL^L1Kf>5#kL@i}A9}e|}KmAXSSA0J%7XI8fYr|6A)pTuPtmDCQ*7=V^ zMRZbXKFf=vXHWSG&q*N+a<1$7pJO}Iyi69f zFMg`vpJ3cGcnsmI-dM_W$99XZ$o0oyKb;#$4nb}Pa|q|kz&?LtZ^Z5OZ(E>vV569L zWoqw{;}?+)-1)xo(%;Q=5T^w+VToLI&U=cf?T*z*KrIVU}i-gB81*lOit(xAVjB;As~=uMJTP|} z;G06dctmDvq>d)ndBincvc2_I$ibrZC8DRIavJpi0xmo(N8ub6%d7%o*|7J)0*v!G zs+v|e01#XdU9D260I?1^WQZC_C>Nc7S6>ENEd+fC$k-8JAC?IZ`}r|`3VGNyG@>Lj zs0}5xES#0N87VBl+kWpU|MZsuUEm|Ez8ztD&=>YuBlIS+DMe0<_C3|3l)-M$eD+Ju z3!@0(wUiA!isaf_uKRkfC^cWyveRC2nIx31DqW?gP(#JPugiI zL(vO2-=08JC9Ne{ovl!-3a({*cUfO%W71IUwSI^eZw^S?5H`>(x5wU}V+Jel*;SC` z_CkP*p2kT*Gbjr`>@Q_|6muXT2WUtp1go*dXg-=Pw?f1=z`e=Y*V}F&Rpu5z%86H6 zongk%m?LbUMuPg4Z5)F+NDE#;X8k5L;v6>f%_ap^2T&Ubnnha>5HR$V&fNjSSfGOt zOnxjb^=|-%C6jvrj@-G~k3hW=(AtqsWKUxQFnXNe?QNxq^w!%ra?F0y=Ci+F zb5?-vg%pSy&)I=WK7FzAmQfHNly9ugBQmeW#0=}jn5xw(Cn!CoYVaFj`;u6|(Zf|}gBlnPrYOI0G}JP=dONLvN*FG2m^>orGy z!YNnt>rC5y+w1P>7#Y@bAf_~O$Elr1-?zgY>w>neM&&}1MmY;G2q;RhV?o^{PgD>t zCZZ4|Ck<3~Kmu;rAOtQE{a7yD4dT1ZFR?)^>O;}vYr}tuOjlLOS zSCIwC>x^JZh!2x>TQ2DH_NSMu05BYsq>T@CqUcEoA{~-a>jd1NP$w;YX7VRe+kvDC zyF;Us*|hP_R{X0?od6S3-1;(8ygo#O1LJ@-G!WjlRh6VzVDJ>p%UlF%Iwr&V} z5loP``{DO|Zbq%t0E-xha#Gc@{;y955S|Fq0kI0Nv$sRmD3q&ORCOrDfOVwTdcp&U zZe^{lgsM%`Ms8!u(mly3Fg1TXksq=RdRHBgEB$^&@AIRgAG9K=PCd1Et`g!?ueVob zWHLH+ZUWsrB*0dXHwZsOOx(rJ$5}vZ24@(}859lC62$`YT5^_a7@l=<&4*VZxYHus z^`v~iok4;rBJINddSbp(uPuAhB>%~Ie?pv!PnfXYa4!Bu8VEI;mxOgZz zj9cW4$U`|@bn%lyHeL?5gj2nnZT5`KNS(Ls>Tl2;o0*BfY{coZ->6*l+eO-!3+ZR&kId|(Od7~dho;V zEeN>cqyEHQIsE;_v6@~-_B$xfP1>Rk5zv-)IxoY`4WoRvVTCQaaz~#60n36>U7rH= z+H0gN3&~}(+wx0M`GYZ=@te%ekkhgIUsLe!f`WYzyJ~mw&#Z5qpaAS}Q(bZd$IGsD zMh?xX)d~jA2P?+I9nn$%p7LYxdxj!j-i`->{44!WyMhnNzu$aLuD$X&j1mb0D&SMp zB;^Z^G)p0`vtVqsW>Qic%EMYv$e{Hk$WZqE1Mtz+Hbza9fhsjJhR=~mlanp-{7DwB znV^e*uz2cF?47aNB`8AosW{}{G3qZx6HS?}>(}8Mw|wIE5M_Vd;3m5%zfRs$yp)|5 zWa0mmJ8BRJV#geYho&w~G0Q&FssY0XT?Z)oR#5IuzmL5>ylC6!AGN(P(sA}}t96%q zl(3Wa`G=(k&Cv`++L%7c{BT4@i|z%UVIkPR`8W<90ry(1)jG!=<~IuyZJe!Yh7mcL zlE%fQ3T|_O?J#dw0&FiJ6VWmc!!)#u&19&gc7Mbh68j@;Ar_Aalc@XJVoYum(R$r* z2$>r>O`Ho7*XNx}Atf7(M14e4W~=vulvO;q`Ai$x@d6N~NMq0IO@WA$d7DdJA2(U_ znPXzY^m2+t$y`3`Ad$}lm+eUR^{26)a81}7g7_-4EyPV%ncp;Sk#hb93SQ8j3`o-w8mKR-lXeQwr15Cuswo!P83m; z2g+LAx^RIGH81r=rU)+C<#Fi{k8BHL{d|$7c6uaxon%v=aO;D>+wc>3X72e>VST_u*>j9~S|&>3J!-~ouViH?Ifv(DF<6J% z_z$->TgwJcXpE#XtY;-QeX$xaQPaO291$EN?@n$k+vh8~Hi}3=3LAzrZ#b}N$>$lN zZrZ4C!SjO5tcZls45ksmYAyU3Uxj&*mJ@fk7!ez zgNKV^@~E=AF_&v!_eKq7Ms1prX(PU-Vr6!~RILapb}I6Ly<0T1&@~9-+$a=~4+)Q< z_+w`%nz;*&9=flsQrxx-*CPiG3;knI9o9bs%0J_R(qhbv*i+} zRqhK!7JegS9pq2sdKr_W?fC1te{ijQ4P$(4l4()w8kxv#`--!MWdP0)8F_Z9_9|?* zgfPga_)JJoCIq8GqXKXwRC8?wZstF`$nF|?!5%1|4lR-9d1o5G;1eJeDi{Fs)3;Z- zpv92|@Mj^qY??B3xRJXlSi6mS^@pT2pMVx&M}&DJ=;@MC7?xi`2XTsS0IkMO|A`tm zRFkHt1F8q}bUifFgMkWl!Qaj2ICyT%Hwg#dP<>*b&j#69%cm>a>BScrn|rHAdfdlq*J zDqq7{U3nZBEYnU=BYxzxLGbi*T>=5fO|1!pLFkjdy|rHHzS-tp4u#0zpwCc7s3b30 zDK{hi43e4Qi!HRs&B1;z0A>WWVzg?S=4U_y%G9TOJx6g?~7gluo3TP>H`dS|ykd1YQMjX5?ZkUyuI=jl9=LlzZ{= zp-}{;54jQiymO9(qK_;c#FJJ=gPW*A-5~N9b zf=nhE8B%TB`14ZC3u=`@yr3OIbUz4?0=|g^1kfAb6qZPk0WLTnOtM^Dqiwy4Jn%LR z`@!&H1phdKoLUbeWjb_#gk+|xW7{@(5C*t9=^>nYV)W)c&!=H+DqWqbNIoc;h}rk$dFr#phuEp7$FFkX!Ct*a8np6k2lLBWj~h z18ie{ah3k>DYyi8RKYr@+b8xq8qrDs@ng>%_TGIwoU5n>d;7mZ{WnuM|35aBPk+gM zYhzltXGczrY#$k@lD>KLnA`F`8Z>nrFNi76K}u?H-l5xS7TR}MkQDrR)F9Jen?=W+ z8tuE2($Zc{gaigItKVhVGClww|GH33_svBs*eaDA1(TNgB-VS(07cz`= z$kr<%cK>dQ+OkEezlhGe9dTv?->~%r?Bn#wN1NuV=AUHnDXkN*vHWQeKVA-9TYA-R zn5{ssQbxpCcmhz0p{8x)4W#ICBTCyds!8(r4qSm%8Lz|EW9+U#o2l6R;fP(Hb zcbMg&&}0|$_FDo92B6Rr8UOf1QTo1@iD!Xw+bN`9s)nFbav3fhlr zJ7)~c9TX}OU+M7yH9_Zj3#kQs3qQsAB#z`QkEsfoXeXbr+(WVPgpYLWXU+cc*G5pp zjmfJ>noDu*N8{_(u;+S#N}drB%pB1eJn1l0tl7NiBJb9pj1;F{OftR!yXIlEURTcQ-;i)m-Fh7-7e?m3IiaJ6kV8m`3X#}+oSQo@ z473wJNP+c`)-n)bC&}L6mhGcx$>pG3>X%+51h!*Q9qrzy@i*U{8mRD|nEO@RyezShQsXBO^_Iyj_Hknmdr!$I z=p3AoECeYj;g2c2$O#N)A}$r2E+jKNGX-_z!m$=-^&yA=W$ z5~2G7E=U(LxanJGZ=4r~GML5l0E2NC`ZblZx3(%04&Ph3D$h6Be{ZgreRT;~5*QK7Ob^rQD}glr!_^<#fPD6_gI$ zClT=ld*T}EUG}sK{d(|tF;qF}i+pmRr|YeNjlrWi=+b{`rOgnDf9K`Ex9{ zL(Nr4Aj7bv7yM7AY_>c=N6_rM`4@RmUb=^>C!G4-$b5POI>>6*BGxv1w^w}CE+w|b z>Sa&D@a!QqF8>rCd2Y;wFhuga)pdlgn}t?A2df978^+#*8e?8RVSP^`t!#mE8*ac= z{EpgQjmkW+{fnJrI=5;8TvfFwn^cI9#pkbw%Sn5$N&~A=E6Oe99))lkyf2KYT0s3W zH40ihe93mePUs4xou2v-JB^=gKlBc~0({Gh2`gH-Fo!hw*;mjMy-Qwf1qmY3i1iyO zs*(|yXz7@)8eeu*pB;yps=ao%GwQ+@2SZV5Q#GfOw!|6lNe%TnfjmT&Wo}py48+T~ z;rrYyW<{_8JHqC<^eW1r*(VCme?ejs;gt|o;EZf^O=awH0!)V|WR>htfwp{5={lj& z=ZQ2GXmQZjlfb5Fm7*Pl#eba!+pe+1eY+iszIvdme=V}yeMQ0F2N7`B0+WNEGU%zjdEWpQcBX{aN1 zTdC363P{Os`I=^uV+umLbl}nS?OOk|2D~89`oE9w8OO|y{nTG8MfxG~n;5OKdodmF zZ6z+G2nS!Y?Hq?9)v^MlUJLTD*&KY5v=^<>K;7|T^e-a(Vu*9-Fti#jbpaa`tW{^- za1}qhuV<)o%&eRVT0A(!N?tL=@o6x7Wm|wg-F``W<(*+znHPH+k}a(>e%)wIg4z37uyhjS$lJy1*yHC;nKa{HPiI_ zs4VMNav?`^`ji@^ zk~~wO|7*%2S44hO6pH?xIMV73belzSnbW4}>`yk+8t5jpz!Nuwf{HQ+n8(T1^n zJSkB=Zv7#6(^8h!2oFPg_xDTkpx(P8AoU}XE0nhkZ_<}A7Oeta_fGqe1a)t~kKTUL zj%A5u_6Q!+ZFL#LGZ+!W&Y6jy53EY+NpkAMwl|KSb^t2a(1wm|Sz_Z5e%7RvUG8xW zq?{0={Aql4Sokc`hHKSn9IqduVAou{w2m~lNQLCF=f|P~$f%LD{#6WVUj^|deQws2ET!b)wQ;*4sgUFtg~n}gUgba+e_w%?!zeA*sBd@GzvWPAq^$M+ zD2|oxL-N(Zx5Y{*X5F(r5-(B~9Q=cM|mq8=0-2gX%(KqWzvcSd;k`AAI$43PbR4U{)VSEfAmj z(Iuo`$!>y*d8%e~e85)vzTm(@(Tpo+*!&!D6%`R_q5W4PHoSzw# zJ>xy9AvnPeEwgOx7rrC;fe=x!$^eV5>78~NR-M@Oc6cM)wX8K|LcGPd4tB7=Z{&Ni zMQ}-noQ=C83EoKLHQ6x{q>Cv=dTEMjqb;AZ8j3wA2R!&JixyXXS|KVbn2*dfD|r(F zm)q&I?3+uGjn?zud~qevwCR3qf zvLnVU^+8aScSu?=sX4;jDiufC;j~Bye9i*WYQPJIw~1mh>D^78n#kDuLM-yG>(v3} z@-vukqR3UYW_t_Tl79qy%E^YS7*Eu?D#(h*Uoj{FAD$Uz87FXLO#7047lzTZM}%G_ zM|99wL1g&Vka36}vSwM2YjQowl(o|!&4^d%f5Lt)w+XI?|)oOAk6 zFCMG=nCz@nO?DS^bPW4ciDAj+@078o%&3v9pYe{#Lzr0Zs-n%D-||A)+l_kva;@_? zuIjCdX+2uu*4#g6amShe=+-PQ)a3jOGRk|0x@nOY^>swYiE$0O#S1akhcdb}g1dU& z4~^JfU@&h|pBDP8Tt|`0Zj>kXRaus87M(hk|0Aj|h~4l$Sr3`3ylRbeiT=B8Q}s2B zn?_dhLP2V?+C+wu5Sh7#YG;e`Yr2}m?D7x9B4x zD&x!6#M(q9RG7B6`FiPL(1XxKV*O1Z*@57v<-7hahtkK68+}{s+C>9C?a4!t$&7s+S zwVl(EUdksc<0x_n8E;6jJUswi)=tOnsMEj-6g5~^Fw`D5?lF*yJYVBW(YB^n*k|qQ zb2TX$vy_Rr&7DlD9J1qbHI`4(c$kg=KK1GPOUXlX$*oh1!8Ip?yB^KAQV+!TZHua` zY>afdE&bDqMDv3e;Fo3ONkY{K`*2&v^lkTSu1*<7c{^xKuXV={S&SLWz8~sQ)$vBt zOK_VW&;HwuqiAw)nVEU*7;cuU6Ha z_Bz|4uQ6tbB*!~BojF|7`GqC>2ww0RL2iudM62beSlvGfw^GFZHMP(v(gWq%Mxc_8 zOGYXs>(dFBLYv*Rk($gVv>!~%X<^6#+cQ8H$kTph4LNv?vEDBQ4%e3pg@sBUx2cO3 z*hc-851d{s6_n`xu}0M7oA9+(@^z zF@7jtIBVp}v_&DM-qyQ%-bkZKZPyaeU**u3Luboi3I)&$DIOW{}@5lw@F_(`F zZ9_B8n73@?RJcu@2Z9@%i0y9j;ra-UCm@MDk|Jw~vdp}NBt2yos*(JdcV{)6aJ>wj zDz3=_Gx7Js2uA0ysug0;CumMZ0L_kYL6BT4w&5YtSje-4bfKeJOVkLeH)C@)U1GCW zqE>GTgTyo)zS)!0H)Zt{#`Y8+1uZ83iY#w5?!i}8W+7b*Dexz??8!22(p~xUtD_?z zRjCjf&$L_unW3RXQc~zGXh{eV%4PPY*amH|hDF}w?P>VP2hZA4 zO6>?S^8JWz7!#LaUHD38Td~Tyr@a`z)2EQHPdLuk@?pE#k_&8t8GOY<#jSxvsd&v< zdgP*>m`qR3q6MWThdgK-DuYka{h_U&?nv`8tlu~<_ZYQ_XEN^- zyS1rEkCq_{2kEO72z@KXHk-uCp;k47)jI>R#=|8Y)voQfI+PACBbon*Rn9(_l*#YO z4=9pTJ;gun1K%y-Ct^fUQ7%VMbyMh_N-;>1%HU;4ks|)w>0fWl31U`h3MMmTfej$) z=rbam8?(#_4x3HJwk;+gl>{PX{Zl~NK&E)2lFG#_9JtPo_^vC4bw7ux%|I8xV+=YA z&68tnKarZb>v#d^N7`Z{6Um$ry$-^`>TLWbzw%_UO-D={y1YYQ1lZa$cn_SCWjhH? z(NPFpqdb%*UD6buVmmwr1!@TuH^JBbTk@vaXY19gSO74oBc%|e`%w`eElw=Jumptc zT`1yYbYdtvkrtruq0sd%R-xd&*7V$Md7nyP;M)4jqh|vm%*UGI>#wcfra232#(BW# z2%{$H9GP+keR=`;2ucNTI$eY}lkUfI$t79ZWo;sPVee#I-qopG+X}r#F1e8T&5aQ; zUueS1+`NsYaFC|KBtE%Bg}@&3aspweGnNCR3?7|)Smu~Gthzk zEX5rtUVSR1nb4gfm9lhUdimo zcK0)l%SSs%@z|G~+!|fcVHWw!1!ks5DOt#IZmOXXShzjRiTaVlbS9vPqK;8$+o!_u4JZy8yTW!#gLtlp4Gkk2p zl$GsyfYm^S{_-*D8k{y0*^twl%kJ9_h^C+;_QrAHKd6+%#5;M$T(1yT@jEKun`;X1 zfEugX&?y>4R&Yrw8P$xizDPY!!s>AmNwEm7dl4&pq3H`T1#hweVMV4v7U2a{v;=rq z?|Y=O@Vyk{-%g^)mba65I8b}LhOx`tPp5!du z+o1#g)8<@I5Z7f0^52+W&w7a>MsuTiwQ?g3Dd1C0=dH?4_K0Q$N6pz6a1~8HzQGw$oq2s` zxnz6OA}Qv|m9ZL?!qVugPm%VCJR;smtjrh{2wFJ|!=c!BxF(E#q7e#^WH^VQ^2_R4 zT_6O^oRt-!MMdku!t0T{>~ByB1gFDf)bh75gTS(MNF>r<7yU!A;N_)ZACY94{Kd5h zFK`#C^6ll?f3pTagX0X)ICDguXWxakK!w^)>T>?Yib$Oo`Kk+U%DH_P3X(w+j(z&x z_X!T51pztRJyAKf??Or-ASnyl_kC*?6#>Xo@M;oLKHhyO+6PEIAFav1oqF(I(7$(0 zIwD$me@|Q`9IE74!paPHu2p1$xsh*3{FhI`h5t?0&a9&T&zPcta@%eYeqcMoS zKiPKME)w(xOn*N%+qW(lCke)?1;#3rN4U?*jgWHV(z(CA%g9?PI>35Q53(St<6Y~C zE&{{X{{Al!hQs4fNNtYLe7?`5vq1CFY3hg-5${gVZr`up-y1<5wFCy{a?E$%7!nF# zr#V=Y?z6cDq%Sw^7uP@Qh1~&nAD2(P_Sc@_SDj$@Q#``^JHe*m?mbn?`@0KV;B|^# zRPGz-|8gE^-ek4?dAo0*`EYkTCRMD~W7i2x;jQIvf5lFNkvtm6B_V`na|ChM0P-0i zD5-EpI*)%s$PR4fYivLdy@4haSc2|uEJAJ5a_~k3 zxf0&zNP#eW2;XD-sJg5(g1D7vLi3Nn(h!(3>D?@fOxfMSQusur9@MQnNPIxSg%ocPO!r!>)Dr8w zP*pHQ=8nfk#4xk@tPSU+FjmOt3m}%-3QY5{(NE_C*|5qDU z2K1omQ=Xkdw7=y4-|beEPEh>H(Q0jZ{z3RTcILTnM)%FKYkW6R#V$QCmtXOzz2IYv z{6y5ot9@B(tsuP4teUngUNx8mJTAi0_Z7Ra(Nv%tY7kbiw=|c&pNF{F8C_#upDhXhS}AFMUJ)dg`@%PyQcf8xs-$ diff --git a/vignettes/Python/RDD/trees3.png b/vignettes/Python/RDD/trees3.png deleted file mode 100644 index ededa55f50ec7b977f00a93ae447b6388b585706..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25988 zcmb5W1yq$!_XY|_B&87)q#FdJq(i#9Q<3g&5CrL#knRR)kVZlY>F$sY=@z_m@b`T` z{r-2|wXO@~aLzmLJF{nI@BKXc8G;q$#8Hv)kzrt9P$eZqlwe@)QNX~!iXy^;cL@1t zN5C&H&4h&&B!z`Z6zpw`%`A;zU}&PPq6DNnUSfPnACoF{Rwx|#Oqz^XGdAVzQ+xXk zEtBqcdZb#`pRFYa(+=awPnfqOpNC)kgx7>4bbR=tfCP=Ov^Yvsx~Wcew#enl`S<0t zb3U|#cQ{`j2>4W*Y1hB2{?t-I3y*(5`j$kDl*asBt|6B+`NfasxkcYH9NOdKOG?HarY66iN!l1PUnPG@p}n2CwNR+wqg2X;{TcM2DM#0lEz9QI83=wvrNrP%7cG#ePQzRhyVTx9pn08LX0lZB$!jG|nE<7)A} zsT<1_dqXTJ&KHb z|9K573cc`!vaqBi_^JHb-pI(>!PLg_;oyBwaH|P36%9uXSsCuvHdc(U3~daI7+tMw zp-o_TUAe(aD&HjCo zwZlKd0s~}%e!|4e_>Ael_XbUQq3?1ln7JBRYKoXy0rP-o@Uyb8^WI(me?Iy5iGQ@z z__rnJGqyil{_)BGwp4X6vKO|o0*`d$|958oY5eDh|1{)ff{y$TOZ+zT-Mhff{K&ja z|D7{_AF$t)aX>VHB-JOL7E#>70 z9o5gv_=!LK*hb#>((j*W`l!-Jd0z+y7US`s-am z7&N3A7(|~xzkJN#mZClc5Iyw!^IaIwsTTx)j|$^iIOzZAP`1JUjDVBE^xpq;DG$>} z*Z|*M<-y-0fC+@N`n#ioZET?bGQyD|FaG{uDJlc?ua3eXeh`B}3pU$SWd3^upi|X< z+YScy|Cdo`c_?bk$5~K67Ca2?l~Jkhu%ag0VW`|)o&NbDM{hlC&}F~5dy{(02rxYPzKU3l7#!ZfrwoV~T$^Byyro|-yP z&wY#1eIwO{Ov$@kh`dz0yn+05_bc(69FHIMfqGu2svSjBbvDGZNo>LE-x6PIB$DA& zyPull^Ef{()u<4Hvm?#+L&`u8_9j+RQ4xvcJwoI=XuDTy?zSl$#=7zI@_6fP)+XN% z0UckxAcOKVDf4IBIc9%+J&!$hDG7;;NdBuJ@2jnRg(`8jn-9#dH;)Ge@~=*J5K6@( z7g9`qUK~2!UT%!;l(iC7|CHbj%dw7*QkPELNb_LK&C64gl#~qbshKHp+L`G5C6~&r z`Z`ffK10=HB3r-J+i`P^y1dYRry z4|VzwWr7Zt{jG$tMx8a`>5utjb9`>EUpMDwv$a;F!ACdei^`*UvW0Jc-JUG|y5-1e z5~G+XN+wFiCrw{qX}Me{z<>5qFnn5gI8?my0-<4(G}d|BzTOnH8LKjpA07|*`4;2i zbW(q-3s3)nrWVf$Y35HpC78tdK#>)j(yZjfbgdKQno{lx^yzws{GaV{`kXv`V6zt7 zKbT!#Suv2!eYe6#fB%ShoKd%drY`=sXIs{GYGr~L)K z*rx$GsH$}eR=om-$H&L0(d@B8u5iEtvCjJovZ>roMa;!&%^K-8tv71UNu``p?e3;) zgi<{76|WI$EG8qcu(8RX8&!yrlln3sn(he;nWd+_+#L!uz42Hrtkd)gI&x94`1PpB znhXr1TDAC`^k=eQUkSw8BXhH~IX=5N2)v9|__(9j>>Swp8XJv10Lg7*IMN9@#qOZBgO$u#^ z+m%P~o@$Wr@Vgk-3T%EIBGs$Cy2S#$W_KN@P9bY+#(A&vc{Zb7oO(HDYd7SSXy3&n$GsWVTI^rT z5#iyv;RGD`n(sbVy51-zKesX+D^NVzMCG9ngP*uQ>SM~+wrN&PqaQ${YTW_>i2-gX zaH<%6dHeq>FWfy3y_BzMKX<3A)`}*}^qGErQoZ); zb2x0h{=rih#H(Iu_x;OQjwH6roYSPPxTdD&5062VA0=-t*YLK7GW|>(J3k5!hcEgN z3V6>kgqF!Ber-NqknT@ncgSBxaTc+zU&D1l=pv_lg4fd)(G4bDyuQ?)&$24nre(UU z*LH?(XE3!K>r=!=`WwA^_Ox&*pBn;WO>Pdo z-Q7{5ORaw97N_l(+7@g46e+dlUHpd2?e8iH-S+5t-WnMh6)}mQo{`Sh+2kJ&@I*3x zT7tNE^nClKl2mrgpDQ;$@uJeS^W^$C{nu{Hf^(ow%Skp(4(~{$O!9Jys|1^T%tw!Yf;s6h$cKl;(Rf0tbxY*-HrSb%Y_E4S zCNHQ#G)gQ>SG&%7{Dk90u{GH#sn%?f>NSQLy@zh=Vf7j5NT}|lvOM3KHB(0bs)HAZ zVIPyOWL@Bsbzk%@6Ox=ojYUjwu9R_a47<$+lGCLm&`App+_(f1CUysb$eI-0%(@#t4Q?3rb4gb0m z5ywwUMJ48}m%ymI9l(fTe!tJMGZo3u#YwoH0^1&-+j9PxxoHLX+ZKFvkcw44&1T>j?^FkGc zN_DH<4LSEWdP{%EGq_iUQ{-!Y6t=h5<#m#l9?HT?=v%xuNMhx-lY)j~NR4x=dc4?1liZ||qUvsz3%b>pDZdz!~CB4y2R zYEiKj_Ly~b@5Ysakd*DRs}LSkOgQ70!|;}) z!qV5u2NbV)n0@6J30?Sx;Dx9>L_{_{P%|UbSiL|?PTS}rJF^e61CzBxtYc(eGm)(Z zDii-M33>U1!Nn!Q!mO<@5iH-zFBy5ycjfR=KCOf@Qny=i2rn2QN=o|w{%cyyk2BN+i<#=!H1p- z!}G_3N4|eU5FbG}q&CuE5*-}WzhMMM3e1z3CC>w#ztIiJTL5K5Fc?Gr2TCEz!JtXn z^1C8Kk`NDqixL09Dj9(=R~UR_QD5aiv#Fk>q5FT`4h}#lk!OA{bpAp+<=6m&Iag9S z!u|z0J|ix@6#4uB7u8Ob&$L^no}Qkb_WASY`0Izo9Wu=~{z%M1hSS0-4}0!Y_?#6E znr7LvDqEkB4wze9RoA#MhDA-ywhS>!!F{tA%gP}t`hD{t64(<`O#jfVcO;-Ih^#Lp z{_YAj=*n3&eKq8-u8;!ghS@HIf%VsDb|nFJ;@d+m&60r$)DnHnS#%HDiU3oP*ns@C z&_i8dgU$8;ng79YVi*9&!HLGg{yhO1p#XI0PDLU8^$fu_M1bYOGJ;tCgreZJBW9cA zUPJjb&-=vu@c`ionR;DDJi8lXIM!1%b^T}~baj{2I)2z{d9UYKamk7H+30p^+N1fU zwL0RFM7h<~Y*xVxvB=~UxGeA@m@-?wd#7jKP7Gv!W{YAg4}Gnn1}yi`$C zTu$s9nS4ZAQSJ5(bKqN@$g=6vqPgYGZ*HQ{mDf>2aHO3MGn3Cx5Zo)PQh%yh$eqyb z9{SXy=6-u?Z-0MoSlENsw|VmEa_IaQ8Tw5R$)5=sh!zV{{WTFof-k>>JuWJ|y*+O& z{-Vu{g%}Fq7baSAN@&8aTC6tAP&bD~QCCCMr78VH;@;nrj1dH5 zv%%wbWH?jrK)trQiXq>>a?nd(*8K~zR%OpCCQ)KZ*bViFqZH?TBqqK(RG zea?z^cP-rv4fkeh+MlXT77^xIno+)U{A+Du7(qluMcItMJ(7}^&c3-m$F|w9sPm!K zcCMm1`O{A2_kBUYRJ=ah+geYy$(yb+nH_ojN_cv(t5WpX59`mH%2BqVOzH!)u2pG> zWHsNQN<>(}iUKz0bi)vXIh<{Q>XO)WPxk}GYs zANK5NSd{;`VbtTl_C^?Kn@K3|$&l~~!0O7z#(B)|kQI=hV8MpLVgAW{>`IdiO_QCC zPieEtOdqZL^8B^Zg79G*(-^Rj1;HbPP|W^pDA+~iz!Bkm#EhebCP}~jLfE)> z&%QorhlBh>q06VMkf$D>8pvBukutottD+0$zQq!F1y#-9TkN>ic7ID`wQ5W)Q|mM6 zjaO5WK!ChJv=tBd5B=nh2%NjH>>RHg7kC89t6nvl>L}d5M(BuPJ2Op|Nz#re`F}nW zqFCyz5*2#kj~Sz2O3LsTf8#IMR%jCH@6$blK(3~$I()s~Dh~ioR-SC~2uOzt!Q^Fb z23C_`JcM}m{_uglm!*V92PSw(48|+yxtxR%KbVU3SrRNP&Kt{Ug%4WJy=q2Al=E&I zsg>kMW6JW1Vq);Rj>DoS04To8A|82r7iBo3fcHnhC#3W9d55@U89fyCN;7a3umSmL zd>*|vso2=qTQe4AxnL)z{`mAAjoa(W(Nv3!{jTpm*}xulvGtix5j8?!(i!XzyYNN< zq=llg_?Z5pc8v+r=}&gqyh8RD2d_99OG=*7J$;%31x4R&|Km$r07kpO`pk#uBbbA9 zFip1WQ=(lLakjrGzqPd${c+^^nzpmz^~K@$LWhlku@X&Ij@|dv^1YL%+v5&jmm&Wc z-xDxHhm>zUFkpNou$HPs85?(5+)u~Vx4=#fGgPg8s$KAYrmTJ)ABr|N!Dz-k>1q?H z%R*)hM2rBbwOorGeTu7jX1f>gbC?>#ocJw>Phwg2*s9iEYDEK-l?!T7sdyj3l!Uk3 zURkUZYMGgtjoaGgCi)oVTV9$U9DXGp1#Hq5xc$U#*hRcA=n6Enm|zJKugEkCTVMoH zz^++hetf+(l4~p0?qfAgtDVM8$m<$KiQjR2rUt~+*5#|*}G6zlp0)!L;jMB9#*QhUE(b(7+kt9*=2OL zs9tW-YUk$Wo?Lv797SPjT5=WxpRBS|G_Z?_0q+RQKZXnoC)lP1CQ_r1D2nPCSc$&V z8e2%|3?oIV^dKZ9@LC`Y1ldhg?8Y(>-G2-iJt*IDvX-FVi+}t=eK&j{nPZR>Li0P2 zwmCy#`#_EYBAg6xMHAHWX)+*9cWR~JP$fxLwbl$YygJ!pdvoh~nNEESAe(}@d65au z$AA1lTPRr4Oiw;RH;1IpAntNUJ&)5iMLGi(p2>9j05^NJ*z>+JQY5_hkB?D0;-+_h zX0d=Ygvgrs*-IbVJXYm>+lSDV;3WR9XHKN$xWcfgWlv$q8f0|0|J*++jOzAhWV4iP z16C{5t~20Sq|ulj37yH#5GaCy6-GSSUtEl`S3vx|V~)X&sfg&es}16+Z9@#^no}(9 zM?6pvfc)bOHc5T>MbO#5A1_&Aeor)Kz(fnab3_07=a)Q?G?P%kBp{fCet7vxW|{?- z`!A0DSvt%BF7qP%Q`je17E`cFf6~Vy(f@4IK9kTee0I7J_G~(Rkga$IFA>2xVzkjV za%YgD{O+m@;3*vmAYfZ>)bud8y`$BRXG8J-Gf^B8hA|1<%rv$p;-pwV65spR&M**o%(iJVc;7I4 zW({F1qoo40KODWF0Bl-B-23<6RiOsmtC>{FmrDf(T3I>3odNK3m4^&2?Ym_C%@lAm90u8k@y5D%m8FDw9DfPVyNy zElSEc@nQ{#{h#v(N6|)3?lNbG@DLBLaTEs^cSQf!mHLVkIq<12U%v}Ig*+m~?9=-* zCOwf{0+i*Ey%-K6AYiJU_q5h<(;-+s!ekl4BrN|dJMaw7@HTmYn^m?r!fd7|SYJQj zhy0EZ(8v3Hf}@i8AP6{cZ)6GziXpIZf8a$T+CkHOJIjacl7572IP9d3Du#J;o)=V; zHe%1fE;@bq|F(;)timUw#y;vMUyQPLe*M~~TwnfS{(XoB$WNg?pU8syL@xZFk_#Wy zVA)?RBs^0w2Q>BcpPAoSeDLh%1oJPZ$+a-K{yI{&Kjluh%gTA3fZ+kE#_ z#cNtFuIbtbDH)HRU{Py4<`zqVk(&tc128U+tea%E+up@|{Djcn#) zH10R2lUpEw9~JJVgqEdyooW7pV36Yg%p#PC(*B2uy^v@kBqE9U3@JIR=V*Ay(s)dP z)2&a%Xc5Cm2ctc-LPY)7;|Q8ULdc=sb*9P$9b}~!Tx3G@h|7LhK`2)^=znEI01r-- zXbN`(q9;tgEoXguN`mQcHJk;KmwcL0#7Fq z#3{_D-W4ID1uLw-E&q43lEA}uVRE^j(13k_PEKa%ARf_&f+#j9!3AZCnII%!lJd-|}359#VWOvdtA+nrl61IPrgGQ*Jq9SI03fFsF zMjZ(-uH1!Y4{d2diPxH7NEIb!B#-{MIQ{q9$pmQ*KAS0q)pX_kfuPfVV_GC&>)t0A z@c-JnlO%(aY;U%X`nJyQyBH_v@PNHiJ84CVBnieJcT5lz6}{h|$SUqEG=(|c`lhwX zn1T_F9=61S-9j2Zuyf|j*YPdJ=3;9rNsN%pZ_inbiIEFN2wjNo0D_Pr^h>hT-5j9%U*gxc#l5&3V4b0Ko{QkFX7p* zIs$qkB<$@gPanSZZ^r_WuRPCh>E9+8+_27qKbp_=q-15|SdXcRUv@VdSCEg?Sg322 zzk~$~eCT_mfSj$EkV)8+Ow59{7$H;5PO1K57ORh0%kY?SiHY{dw2k-RA3U4G)k0=d zwqZEBFHK&#`}w4u)dK^Huf!GW6KP!lVJ#Yp+e1#=f@?ipNeQL)L55A$*|Tjt^DTk! zM+dnNX3W6_DF<{G9tmX>w2LzBG2}K7uZ`v|;*4y+Hpy3dq{NAQ2d&T{@2O;uJ{7m{ zx*g?!so~>m=EhPd!MG0r0posm!Ci9t7Bk8dQwk8klqH->FX$t1Msm)APb?E=q}cfH zf&vBvfHGx` z4l+b?iXwDOBnT!2Od!QuTg7Sz#1AzY^IZOWrDo?>6-CHYlEYRzOM}&PPek7Ogd>VH zqHj%o7Z?64hX4AHY~Oii}y(<;wtK?+a2;OG;5I)00)#vAYjyZZ3`hF7=`H z9xQhRx5C{~OG|9fAY;B|#;v%X6`nAL%1O4u_B^CzQy2&90?xM^vwL754;nE-9@5jW zva;Gg%rwM6eDAYtArTT9YPQlD46SLcFtr2pw$*a|14UWpW}3P6nWOVv#_iqsaLY7JQ9InmyB47eL$?VazJ;@OICE^Jovrw(g zq8ue9W#v7nP66ahCvy&i!vGtSlkpKi6p*tT%F>pW(ESG*KFQAhEM`T0hd(EwC}AUrDXgU<*jKN|N(K>h1Q zlY^9+T6~l1HdIFd6b#=+AL8IZ!B*J!ykzT`d@H`I&1^@I;3U6AJVp`z%1Flw_S(GA0vBMkMW!7>i%q|s zca5AiF|(CAU_HUyUoW_n^XC`oJS&`r!IYt)<4{~PRz6Ca&rkQ((rZeKkH?L1x}zk8 zSz_*C?Kn%|Ja2j#A}+|n!V*hUHVAOP5-4~#tz^0ReZl9pQcMh{bJ@f_8X&D)AnO^3 z=>7w>SSj0Sid8%!*3BY*89}2i-2$fYq?(_{oPaCuD@eg7l&XQJe zOR!B0F;M*3ak%M+;)Y+hq}&@#2zz7mOi4NYd=U3WL*l`*GqtA=opigA0*q@pU=L93Z+<26fkNpKPI4W?sP2KWuNL0{@*GZ_4~+aCw#!7AV( zas~+&cp>7J@f6r`im1+q&JeaEn1f7^GfXhr@k95w`P}LQ1+M-*wPbgW_H@ZWcGvQ$77gXNKJTd~Gc?L6Q zj=9vebG$J)S>vk9X+&C{4LI@Nbd8S>6RftY8p3MSXpJwdembaVIy$Q}N&8|eJUStT;G>c? zA|1RKh4O}C;rKnX*I&Apbt?(b7%hd{Mj9;-(`JAKq59=#$o6CzF-Yu~3=gJ!NevDE zoUPdh)6v)1ULp^yMVo8r=qh)$oE)XNr5mSR=AG~;frRI5 zZ#LBceG>^nfYa}!{p3`uH)vvmCGHXUygMJNc8pB<>S<=lm?IQyM;N zR(bQ%#r)Z(i%Rt#p1^$rWpm}!ybcW@6Csi1Q_K!(85#Qvy)I{SbigyHXm5ULX6oi_6sEz{xF(4;{;DiHtigQRk{ndw{0Mrs)R0EdAqqVln^R*`AH;Wm7mnMA|LO3 zY>dF?unx0&^Nh@GI8l*(}n$Q5knDwywu~^;)nRw}{w7vw{Az{NyF^Mxl6d+SP08TbcmloL0TA zvz~|Q!@LipLxO4-#I{K*cg5_>g*9slNGtplGwuN(WeYY!rEpRfd!4q`_I&pT0V6Qq zd*6<=92X2%VTxvpv1?M;-p=$t31lbyEbEsK+z~#~+?!uFMQA;Mykd{{F(i3O4Eb`h zg1N4!r6mQp3Cxf8yZn5-Azs&K@A7@+@B~4ZK$b5=?|BiOywT35{M#Gnds@hG?O-i__`HmL*3x`DwYC6Gk zz|V-3dW8&7;2+&(ksURK|5SUbw7sQ)TB%L!HEL>B!8egO9!C_n4Gc{i{b`O3b)_;U z6ho6$h#AxE%|?pwWnp1w@>z@BE5e2fRmiyGr-l_z8Xi42M(&Mc{%uD~$wRZ#X3`Zj zI2FW;TKPDNASF@~XiQaOej%ZF2hvF&_rM0f`%t+C!y_mPymfF}wJxP&&ruxPa*aAo zUQtL62UT^MsI>b^%XOh_Dw_8|&_;whFf0N34j2t`=)iFGKcqw33>@IJtgb(hr54yc z0Ug&N4=t%H+a0vz7sYdG9YF8FG)rg>JzB^%9`UpQmZ^AGyz1EW{UHD zTqF_M3D$vCx_h6U7C*G!To_@C2(-a}35y{D087rAXT^)tvBHKRU2|hAE?d^*>*s)iz)5 z_Rt@xFPken>!$y4-fB2nIIg|5l!8u6$`GeD|4ZC$CPAHS?q5%b)$#BxxsLgJ5B`fJ@3d?!(N0J+! zQ!j6P=jVC!C~Nq>dW~EzUrMjBQ?#(0^B-j{MraR5&8wr$`T3|pOw+X_-SV;Hk_+rT zM$vEN?xYfq($hQ$Pp~{@)f1evyg!S^X(vSUvfLj@26_B7O&}V7Yt%@7*hF>TL%bc2 zk;`ZoHKq3i2t6B~Iga}oTR9GC_Hi7ivfT9sZcyt!`Lnn~ z)~9C$tHTUpiz$LP99V#0K^NORYWZlviL+>WUFYeQ|ePVW{Cdb5xO8rFI4vd ziw7O$PL+}q<6Xtc9Azm4=sRBU9FUW2-#oxD@K0LMW^9%xNyOSw5$+5DEUqtxk zmDjyt9vII;q2oRJ{~T{JZC#w)^ct-?SO3TS-geg8+VIHiW4$*DY2I;DBX58b?EyF0 z`bOOFW_Xu3*SVfzIfND+_R5XSyE54?kJY}yDTI@&z%;PkUMs!+DzC}A1EnwNQUMr;K~C!EAr4w zJ{35y^v5<l~-CTG1*SDrh-kCexePHcZQo2v3FX3=f$qpEVER+MoP8#aU=KGGdA`6KKoj55oCGJ5C8RtRAPw=p;h@di^%}7^g+Sd#?((RGl zP~z2hD0m~FEW3|KTo`1!W0gZh)@MzDz!Q5vuoF#E$189*Vfi9yTs|p zX50x11G_5;dOo8WeFpi#L)3{EV@#h4>=9TNKhCD*k?(uIP^C1okNM!wDfMRix%tf! zP1)1vvE)I_SNR5?eeJbKxX50!Vwozu=iDBD7V-88(+Z`2d9p+=QHdV!J*bG6zeeFm zo*ZhB{?o#DAR8O?y@x=<#ihAdv1~UyF)MLjBz|53&6x%TEts&xv>ffXyXg1iWvQ$^ zMYT2&k;K-fj=oxbBbCNu__&mVhqO3Cb*O|dcZ7?K5t(2@L~3PQTMj@c7)P)N%D`iKg_(%5D@^P$=bB1pp_d-2A90 zD&#~b_V&O{GN1%Oal4)RE)@C*z%C599If_rW|IRZ*{%#z6V$#-b?Re)5PdZriJ4>x z1CZ->L7AC(r${(f*YBP?HtRZzu?VDz5w6SRfY4gG#LLn?nV5c?Ehg%UvwGT8g|h60 zEkK?cKd|%S482gis&OK3Av{bo<%c%r2LmM zhk2!VIX93el462v0V#o}qF^p8Hd+2vD=6=nfm*%eTAX^rw8mk(M00j&dUvIGa@_ci zpRj=wN+L;mfxc~VPcxtg9BqGK37}B1SxBS`xYA{iw?B3z&|4stwiAmLN}`&`x~HD! z&LD)I!%;?gxjAXCML}qKVX`II;T>Lj?#vjq8`5$C2g?eT^<*>AageXBzuN&ICcCWl zR!fgQCO$q7-2lhoai$AS*J+WI;dC?3=WsS+Xqwk%3yBk@bi6nnRP9?EjuFfzlM2`p zh`-<>|GCT2qxgziSbAw%cTzeKzh%BEf-)78ftv&sx|oB6d|U?cSqu@`EvKU58Fi(| zILWih-=&;Uzid{|J2OE;B@`r~Voo#IrszRGvVIAR<_P?N#V8rhjA|Cv;-6BrmZ+h2GLYqTm$+q-MpSaEl>f*jID^8Y!On zbhiO(5TR-PJGR;CYd!17?9q8f;W2V*zW{+0cX>fcJ5nC_FkjDY`>h3(JGVvl6*LLu z6nz7@3^B~MScYtgtS92w3Ot23Lwt23HsJ)yb|cfNrBdI&n>pkU;IJMkCC1q+xjSs( zoCdnYev+5cU!Z$3?^16jj;(pDoRnR)T*mBr(Nt!Q$`;kefV2l>!0Uoq`l26qNL1;^IU>aVHDt?rc44PcbvP`1&Z7iz}SJX3HIbw7lL^Qy#xIYADgeuMd!hmR;aKsjlu4wW%vj8)RyikVYWa$TAa2sY(|m2XQzVOtSU0^^ z13EEK4GaNMmYV)YY?Y`;b;1Y>sCf*-rkg+$Rq4A#l^4r?L(c8}nd~og)wtN66oVZK zrN+eXn<=e2-@o>k1W9ZXGU$rL^J#@CM7lxmUV#@(ApdoHs)GDbLgLjc((yv&K&bWs zs*wZ=&m<0;w4a5M{*UTKDifF#qm9nTySC14B~lh3DkpfSDAv_;&|toXSj4HiDH@y_ z!+)6`z^k;P8sy6%wquoH%8pA}xyw!$2-kimrxc3*#Gq9j1Ss?Md>S5#MeplU{U$&R5odKqEDL^jUS3+!u5`Y5T(J;PINIDMv?2e!B<0*K0Hl-xgEn1D_An_f4 zVV`I`@DY zF;}z7_>y*}!N~$zrU|Wk?Jse5w8m7q+tR>!LxdJQs_heU3o^Rjfh=D@v1`k~?Q020 z1aKD18E>p_TDOLziH9@s61q1+*dB8UPMK+#yBB{Lk5L3qlgz;ARe(OtHl#D$NEHPX z#B%`E130Ionb+D~!G=uw2V{LeibF^DD*`&V7~0ma_PxMDns(`I_OdZrethW-;9r6B zHLl~~?fo&wF_KrQn90fL+t8U8>_j|x6?9jNZ7nGQ;_9HHqaZj~Y_JTNfh;Qw4OU&0 z@`igK`?Obwk&|rO5aUk!R7+KE+>ejXZjJ*ot`Dxg@mkY0wq&W1{;^A^2w7L$k0an@L<*1k#q0xN#e4Zbl zI-V*jG9hw8RWjm9^N*#Dtu^}ho5WP7pGVG>;Fob=^N;M9&7FngRh4>(~-8yRrG zRtk1KeJI+rU|R)T)-`fl#fQvKns6(l+oN3w3;EFUK}FZ1!tw0K48VUI2m87IR<)3c z_}~?`f@<$kYsKrYN^){&PofYj#41Q$vH|!mhArwAPodDNkGJyXAAfm92GrQ;o=1|G zf*th^8&Dz1lT%vtGcsf=L6R%DXGZi_-xRZ7-W6p&-`#ePi+ z>YjKRie2^>Y{kB@qnabY>cZJR%VM-V8=wFReNj38f7d# zSL+xf{1+4_5%Kn9eMAgm1z&lgD{)FZhJZ=w4M>WD@O*!rmc7Z%6b{6H&OjeyzeD~w zRp*Yn8?N}sSp&{6d{t9?KUSoFaFm^I2;amg!tjW}C3^K8+bvtV6kW50bO>ebiL}4I@Bc`Ypn}vZPjfDO!0GI6gLo%m@qVui%^t^#oo>5Sh z4Hg#)Llf(>s`iYglfNTfl+JK!bY!Nn#_iE!8f}k>W`?{Tlv;WD{fsQ*VvKE5E(IjO zJGM3R-j7%UCjL8y2y3n4bV7wh|EREqD58>+rGbJf6BM?G!J=9f-JS*|8apu|9Mm2# zgAb^DFtPl6XEHg9o9!+de#EJ0x_&{ivXrvg);>s7Pf42N?Hx8zCEcSdi6ulLn%^9y ziNO8EKW*dhmv2PKsVvI%mELqdSW3&WK&R!rX!Q+8@l~!u>z9csY#~`2_QcXh)Rntx zt4N7ql;DU-qzUJoE1MzE`5%!veN4qGKHnavr`r)~kJcupwOGAV+Sdh~mXhBGI{w!v z5?Mk%bvvg{VylKDNV`8v@}4=k&{_=$?1b5B5zT9mhFd@7)R3$1Qazbowof_|M$)~K zB|W{!@-dd&;*!~pl4xWqmfmf<2*5EbQj1?5bA^9mz9xBZ@@}8u!x}|%M+VS@jwLYZ zGe!o8Tt~GJQE*VPUtzrF%)+rla{X0=rH(FP%?kM#B1Ok%8+$hBs;*kqyQ0LMTE(}i z*225O{QV7DfDNe&U@SPQWipDByM~<@R?I3_eF{pEF69ReuSNq5=wgwJv9&`Da*W8+ zjVa%Y#z$Thy}Oem1BRSQDIJ_v-J7Zq*2`%DXT}S{0ub*bwZHvO=n_?^pg4c_aPM38 zTT}t1+ez(0(`!9*7JC@>hV64l&uRB8mUMQ4k>H(vo7PIP@gm6XNZ){{^B_0-dh2b9 zYr}1lik8=|<&9cmo@(}z1aL4g5M0WoMv0ruK%%T~-(*6iP%40%%K{EWl8q)CjS14E zt#&AnP+&W1&0T94zUw(gJe7*C>&2X_I~k}YOC(v*Gd{cR&2miM+ufh-YG%zG81muK z%-9wLALC^mLoN$Z*G21dB)0Q}s?qXT*V`xW)0W({zq3Dy#>XUuR&hFz4yx%FP($xA zfpcH-31xA=IDbITOv@|1S&bIAjcI_ur-NR?1;T|3vcl?BMFZB5P9R=>&qN-PTUUy< zx>91Ma63WR(R>`YcX0ZJ_=2XSto0L-Ld{g#Tn_|u=UEJ<4dI@A&!g_n)kId>*L}|T zH$QmnPb}UvMSK6Shdqq}AdF^ikvV!Hb^vpkR{e>qtb|Uf){!QeJ>2cx2_y`}vWzbW4_(83gFEgqpjS{+zhTZzydspbsdlgE4_M^sUGmijIB{ITg-7%*JT*0nq*+; z->DzB{#FeH)Lx~ZWsw3Rkp?jn>zY5wecV6uL~~%d7(6C zh|P$gGyk@pD>CF07v6M58&tBF1<;HjFEuARYd^%cj?)~qnH%5-Hi`#hgnv=Atc<9nZ4>ifs zKzW7rKlnXT5NH_+q-H{N8LF-4wHWa4ly*y^KnJbIx$Ds}&~YXACaaP>zn~~5?BeAM zG`_m#b&io2~kAFPClNEXZ|IUKdwc{a7M_&*%rnQf0h-+CG)kETA9AWpv;9a$1k zG1lz;)JZ0|bhu(^jo4FTS;Q>Izhn1fjcUH`W$zw+vF`qz++uGgEkm6hwtCPUmtD8U zaykZ$h<7tCY!IExaWiTy-<(#6=O61xHFj)hLgI-@m+!@ZzotPgB`KK+WX$g|ZM0pj z+V9N{3|)^byUr`UzIWSJqfE{PGHaE}{UB}1welDI8`&Kw=byfhGrCf&g%#so{*YEp zilt$aQ$H&Io{{4Ck&_s2EHtxa#PrKMR0Ff5>UHvFxlI$hBWukjSFh2G1}Rkl@Ilrq z3r98_Xj(M&DwzLr6}w?2guWmnLolrYD)4dNt@!G1C-%gc5h|J>$G9r%pecF={+<9} zaNjpQirC1M`|!-STGH;@8ieg4zyZh z5t;Xdx6K$Pj<#ao54aW2Lrc9&D4{*7TLNDb@~!`$R?a+{sy1xnNEz~)qt`r3h@yU)OI#X0u`#Ph)chu`gCU=+;lx?&?YQ>M_tuJQF4vT~IsWY^)QJ`K};& ztAkyS*KKU(tJA+-+dFOa;yE-*aBt`8h&3jhD>;u%4)U4Q*4O_URbv;D@)i1Pek_1-^`@sqDrExm~Ls9#^rG?^i6(=NRP=$$Cy0EYIN-S z7DF#GK6YQBOSyhaci8A%Ge=&s{-8N#qt!D1CABXNqOPw8`%2FBHZMxq75NN%73js@ zx7S)!`QhetBIS7-c3!XNYRAig3TifKF0+*Qc;0}ecTd95>37|^a?0gh8&(fYc#ol; z_dVOQO^T5u5l&*Zzq>&)@*8~;bNi~2j-Y=*7H z9`=st0+Ec{&4iLEPd{)w;+}xK-;oIzj6n$A7uhr^O!zLA;wZj=S;y#fSjg}y*PfYM zk5pvw-U_h}1;z6%S-Mv`QAJUakxu=>A3S!r8;dfldVL)zO5>@goOxtupVSTJJExk- z@j9ivOEF}NT$gp2WqI3ihTmXg>N*$+KUY0odm*Rq_CeH@CfTIj!DZ#NtN)mxy9t^O zUEo_`!jI}2`#${Ajv%3MhH2klC%`Jx61We6G9hwCJ<3o5EFI|%#+kESNd zpQ6SDqMBSLWMy1h@3L2UEuLeMM?Y?09fAm-26)Aj2P^^P(z5&A)9d{^N++tNhBf`v z(>Ih)zJ70LRoEWa7K< zs9>8z$jv~0HDvov0hd3&4FC4@=%@K}9Z#QbzpL*vH&WLM+rb>b+!(**estH3lTdk+ z##D4G>89nA+js4&vTf>p@@ZHc4Bu?*YRtzkB|Z@vXgR@f=~3Ct%BK{8*SzxS=NNnG z93=}>U*MM;?FSXzLr+@YT;m0=7T^ zibUff9#VgKbrLhjK4o;!I;uS={UEjcM2d#=NV~1W%fb~%^7bQq8RG)|MCO{NucOYM zouV~|P31d4xOgl>+__YF5)d`-xHnf`(o6_^;GO}4SzrC~a~-Mi>%npgBexHE+pbKk zE+F2RSnxU+c!|j;gEz(~T_Nv|I-_PcDW^!=#dOa~83Xn)x4O8e+HM_^6>ASS#J=PO zunp!%5T;w{eXnXnY>%%sfl@br+r@!K%VeswSvQtH!2o*fY8us5d-a6VQ^hnda&TCZ zI%UfDH=k*EAF%7sa@;P&Z&DosuopYl!Vkn;J2Fu4%vND8c%MP0tlxL>3G{R{u5oVt z5s;?Z-TXAUIkm5zBX8*v!Bhvs!%mTaO!r zMMVkt#qz-_M`Itq#|I@?ikEZNaJ+(#b^={+IctL@-3cRSSuTak)xYMu|JfpQQ=dVO zTSi1CZu&(~L4=#MK=hrJwmYsLCZ6ERwKgmc+8vOvsa+q3t~iI)>9ueqb9G~sXn2U% zR+TenNC8tJ&)L7o3R^zY>2*GPJmZ|{kOQ332n819kvMNG@7q^wCRWShEh~LBU4dWl z)asd&E&`FcwN6H}LnXF>A$v1L{02L6wNsX_iSYG7LzkFXnX&iq6!#}H8-G);@-;=K zJD!(JnbI>YHydxX3fuP~<`3`{Xbj}6DdJcU*?i0OJ8H73$)Ypx^kyR^wbrw=im(QS zTd_Mg7vd>vKPxteBc_}KvaY!XnF{Me7hGdc_;+|1Nq@5fjt;NA(X+XAAqB&G!OH^2 zc*Yc?ACTkkBdI@s(U!l8*2g6(FG$unS3#WbgP`zi=>a5~DAu%n!v6AQ(*9O4>(UAw zV-ZM*y?@Lxchu`u-@o1{P>_Ar!qfU0CI&{`qc*p(vZa% zLTpAN)5eVhC$D7ml=`Cb$3N+=S5r!$@(t`}=pz)L`G>o1&3LhH-_k`zT~KKL)~CUl zcb8qRqu#*W9-r_#$YobTseBhC5t@Rpi2CKfY%k|Wxi4mKiZW!%y-<0}Y>m!t=sd?| zKRD&jDlB*CN2~vdV2m&ZtuvnB;^4!Km4;%hll$oS)8xAWl}YIOiJ5~Z3Rin4TjZ=^ z!*Jp=dwD`#?rM9~Xuz&%w%-Tdm;^p8iPz|`iuB;cIv*P2nHBr|mJ5Ud-P*pCPtNRD zMs+h=+W1+5uH$K|zqUK8fj4?7EE_tERgMD!O=v;&LN|t>V1#whH>~`oDn%h8R+yH! zQ;si)beLs%?EE(Z?}M9|s~I+5eH*(An0#n*kvgaCR{7;Q-fGw6tf+1RB^}qT;Rhlz zCi*Hp5j4-d%Ecsz2jkRWE<6SB77;0@1_Y10%R~fPY*9J<7GXT{$c;hCXU`v0e*5qG zl}fAddot2YDYD(Ac`czvn*AsS#$C(I!99P;15F(G-?}~{eWsD>DbRF z>pnw%(y>3}N>?hsYaXkwW805NWHTw7lyYVE)?o~Lc=_fuYr)x}@`Jxk5ISn8jh58> zr)~4ck0tA5bUKAbcMZx}`U4omaw7IUp47=Hv%xtjNo8MY`J)Oz!gP&{-VyGC*$n&i zB=^Uu?B}`u^+Tkh0qXGkn_o#yOH(J^e@~+`JrO>r9JqUg2i}a*QABCbCy|RA&7q0l zvq}&pA#Ro7gDp&Kf&FSYc^D|C38(Cc>~+~iZ#8o9{^d~G3&D*<22ST+6B?Q>4KNTH zQ*5wuz2+WAA8im3Sg4W(4g;551_Vz)Gwjml>W_$7*Z2_2*^oot!Vlf*EmQL$hU+CN zBx_^>KMXDVE+~)frK`PBLrO46_SFX1dL8#!qd%X+b}o ztfc$PeYH-j$e9zyf*XkdEmO{%q3&;66Y@)-En=#=A|C$lNGTDCHxFXGKKvzx;Aw zFkPtw!vH`1Mi#fktoM&i{hOawA%dsTM65i~3N7X+z09XyyKmEh5sP_LE#g1pj{Lu% zV_GN#Ts5c8nV7WAAD5EgyQVDRQB?XP4X}IV_xNDuW**fHMGU6a@qwwTb$#0q*TOdo zxLnowe_pPE`1eJGurEIE`Y(z6!U&-arzVE%G4%dOewsS2EM?HKDH&CNYU2=*L#vQ2 z?$dR3(Vym@h~y|Nw?aiF#-DDPKt)f``;o@t|McL2r9JR{<>Ss+!Ui)W3!!q)g7WVy za3beRYbJ9-LP!zMyVvKDzRE~ZtGWbgP6ge>#6(Ho^{e=vfE`|&8mG6B;Fq{;UiS#DcME?TE9IBPe8dE;Q~up zgxcFJU`!OG%lY&JY*GEv>pR)d8tH|%nKFPDL5HdnvJ`1^Jw(z^EJRMtCypAGG*eOv zD#3s`*b-W!?uiKr&mMBFmx0J@k<`ZO+JMDl!#FQ*zK-Esu3M^GwCv{;7zS^G70CY8 zCF591fucz$Y&}kIeST6_RCIK>FIU3~S|=U^;JtaK%VYZAK6AS?3{LOE$5>`ymlJ-1 zIJRjxXGiq6;g+0*M(CIf12Isfco!^4_4seg%N7mj3LW3}**NR0x`)X{*c_#sQMAz2 zE=GqMB}@+(oXGAhG!rAVso&n-F6r@67f~n1D*+5}7eSojfxR;B_6#yc9M0h~m}@jseS z)&T@BJm4m}m{>O!xozoM$?a5Q72Vt_>YqYATx(}%XGuoQcD+@I*(QKc?FDhRLMOE&FO@nLZj+A;z4%6TPP^x& z=Sfcchp$hyZG6g&Ni7R>xbQ$8Q>l|$IV)(argnP~uAa}AiFz}Rh^Co19ZRu>m0LEl zs^<|4&N|xd)*Kd-tbHC-I<0}Lg^<z;KWB=yzKW$1V<3}!d&Y?R|ZbwdhtyPpS1J@}DWz=h$*)2^Xj(T9wNX&O5|f|ywn zt+mdDX(KP|OK}+)`u!%hsp1z&PUG5m`3T!jL#Sz$S>f$6D*f)%Tk!7}y1swzLI6yL zriISb^O}YIyU$RdH;lzjVP|12bWC|Kji)@vF9>gf#yh~*crvzt!rV03aE3BpDw(%Y zCvPnjhS%ShtP>`1#twbc`jK>3-xhvxKJ16a@*6O-*E!hsJ zthjB0ghO{P8K)mtQ_2F*N{;x$T2UkRHyXgg<5DZbBPTbuOmk$J=M3$51=*_sJ=9IL3c z*$XUCD{~RB3}%7?0l?T&kYxLni*i2>qs`tHoxP^NsPdgme(_BcT?s9ECKrMLe0nnR zTW&WU^WKbnNM}h@2p*2zo)rsli+Z!DxdY_McuDpN_OOGC4B&Vgs)@zY;~pHaraJaT zmw)$O@(^oV)G)HGc75c4zJ1_O+Zj8I?>pcy3U;<1%)-#Ix)P^>&jXOY8$}Vjg}5oH z41tz|#cC5AhQ*VtQZ5ObgXK2wqk_$)`p0cBkrRIelg-Y1!PD~8Dv#=p(#!6)bf3w2 z!BIG)aOOAK^@ZP0tB^uPu}Qk_SKgJ3FCn$qO*)E`Ev3OS-^f*uK07EyhotIfklfe={DKgZ{&~ZG;;;@5P2T zxf{?6^nx}6O2 zNLC2M2p#<%URrto2O8sDekrX4qFs0jrHC*a^n83$Jb1-@<*wzn-BJ)42iFd&86&Q3 zcXMpuD6zU`Ra~XH>?|2i@hh7V#cuL54w-L{-Yi~LmOEJs;94CZLvz34z8Iz>%DyO| z-J2sNMrB6g>hordL&u@|h*7>DPJlE66&*I7Sk03`QlYX9;G)`eI!BFT7++>FLsjId z%S2rI-dW9Qotu(kO~H9V1${ZaKw0=Az#4GmH7VWXoiDn>=CHLvcv9=#jzyb?9)mN< z)_zn(>ROD>k*g6D-gYDHj6#@L>D^~@^C!12#SROG)1n>3cltVAwI8AcBbcplRgwjy zLn&dCn1mTXDpzd_?Efr|lR1ItE#s??zkDbC23t4TSC@MkNUv!Dz3KjySuelwn3ZE` z{TD-$%x)Q=IKfgD&2id-Lk2r+ip!xM&x9JX@Aj}&G~WB?pnE_7qM%@=mkU%BO=e`v5;P$tR-r(G5y)+yw|glGf0r?BPTS6@hVUdLLTzZos=RMFkDGu zl^Ws^^HN-OcJW10uuZAMYk`l$d-p>y%_j&jCw4;3mQ(&59~l=HM^$`q|DU5mU`sdN zF^q;7Afs{!r)Sgr0LFrZ)KqP#M)H7Qmnz{v?GI$ID)6$iNL;zIFGA7&$2^uIbQd7n z>2n9FebKLwT$fB!w6^9*2U0d8C#Qk@2?QIqIk^&P$hEiL0>2>iq;#!p!^6mWBc?I` zWxc&XxJ1}h6_=k^N{~358O1)1%AYoVFi)_Wwb0x5Sbtiyid4I^FWqxX+ z`Ls}du9Q{zHmSrhTv*v|^E2|!t&Z?7dhMtrcfYXG$>M)|8hsN`UVAJE4RXQlE4p|x z!9TJ1v-+g3)aSiuoA;X6{0XV}_33ha3)3x31qoDwgg`U~n*aV+8g$eqb$DM;?&p8` z;s5zBbhqxvGY9|iKmXs~mq2mrPS=cjJ^lZ71&Nknq5pBw-ydZ~PNIcZFlAfeUq2l9 zcj|Cg-2dgHOQ6=_gO85$?qSlO_w|1+K?Kdb{cm@T{HcTk8WGe*`bXUVT#|o2-5vS$ z|Lqqu{O7j+S*HK~;Q#Zq|EQV&6#2jRmjCSSKU>d#TH2p$g6!b`>E!>t&HiVA{bz3f z4@mYOxbg>x{0~a|4@EAam}a4IXk5JBya@Vh$x?%3ur??<{em`xq9e;zM>O-90@ z54paWzajDTJ?Iw<;{5Kv7i|*y^SIaK`u=s}<$f>vb^qY%OGB>G+NG$ylNWkVgsvM$ z9OdySYB! zD$m0&-;(#SnTCh zj|!W%>-SDNRbI;^R$4}r>{&;Xa4xOe5fr`$WB>V9_cZsPvk#v@LffM41wsp-HZi-W zqkHSpXS~$F^g7ifyk^e#0ao12*vpb5jWnkV|dm|O5`%$+6Z#H!SuEL#)sN5fBlI&;##SG7s^`v^6Rcb_0-Cc(l*r}8!4b`FH?vo0eN`1I zrXz@OzEJ7MSRdu|S3dGj7(FQcG;N#XvOch9U7(uTI=xj&DDZPe=-U3*$BDS5JQY-a zv0x_bv09RKoRP=5g)bxBfV*ULZH zlfs)mG&m34{r%)tE+W5Id|u9E53Q7I&(_l0NKJh4qZ>Jj7=s{tTskRh{6?WLJf*0p zP|1P`Qbw3F$Mt{^tuvj}zGLeLe44Y&nS`F$SJ#biUF_x?{q#L%-K68GM03A8^>Wt4 zG|5XQ*>?~xH$Bt+%yn?M7yCj%qNS3+)Q9Dd;tNFkszwv(P6YQ_iezqWk0fkMpq0iF zy641~|8*o9`;}apw1`Y!UZK*}`BB?uNjcTyPj>e<$KO*3%({-UY`*i2{=v~pzwz!s z=9Hn-XI!nrCSUax!JuPnDCTh3*}%gcnMI-&R}Id{+*(Q}G3PRjnAv(aY~y;@$o2Xj zru6n}bO&_YS|$CMBIp{SO!ms*o*>Tq&2hd z-&xqU>$l1@p0} z5IvazeTT@nNYxDLmm%zCO9aKN%o1|3A`|oVbQO)G5*5ZOVMSkAYHjzJepWE_*<(lF zB3%9Xc?6_rx3*7fn397YXla&c8(oZl3TE9A%`x{LG`kegOo4`v1J{=gBKqzQjSe?RFq@2cg>{8$k!-3YggY)1M@j*P_$f3ZV= z0B92zziMtPKk-})lZyLlN4>#(M)`x@utfKLD{TanK)Nkmz@A)IKOL<&7<3;U^^>%J z(b0%77LMsdJxAUunl8OnTx`jaZu2{tC6yIuO|Ir#)ngEO>a*^I^o%go*g!kMDxt z-fJ4wV9WUjoI@l?r~W~7YAh{hYU|J1DJ|YWoW(7ozl} z>vdl)Q(X4?r8p{9+yL6YY{ueH2?9bBpI;w$zk;W9GUvk`0~NYoe=c_MvpPFWUF+~2 zvgNpKjhljcimoGf;@=m>Z8G7iF)FO?B5k2v_`aH?2IOVsm~q#_tO3c@4SyxS}MI`Bdq`2`GsR>kfF7K zObXc#-yNGomD6}=buFnLy4$6R2V6^yy}~5SpH#Xy{?(=-Pfa=g*rlIEc*$;sIj5bq zy3Ws6;5@_BW&FFI!Gfl^5i-KmficXtPqjNZi9NS4s%dw zJwqMLJr)6yknY;I&!nC1p>@(qhlheu5%z2}zXL}jiH?uauq%$Tts~Wl;5DBERJsha zyv#w?_44&ME>IL7pNss8{U2NiQ{l~H0tp-F11sgz!%?+1&YBql=>jJKO|tyA3dbcT zS8R4)j>dERB5-s3KD%pJ*iBVg-pzjEP`6radBb+_7B15IK$y~c;O;WBGRAUOEH-lT zfq|iybM7Y0T>H`fy8y|=kJ-;2Hs5NUCjCGEGUMMdBXwkVnTh(Lj}_O!H+`)0#~4+= zg9hnt1Xzv7_jFhP=ZaduxfT(Hl0du6j0chMkI%haIa*zDw>+}!d(c?u9=LOu{xEL^ z2!U{|?-gO&sFH5A1DNWQ&&$7t z*?s<_Ap82&D86as^NBKE<*UJfYZsfLH$kdhmJ7e+15|;dU_kpXLvI@mBPR>yWR@oC z}fkI53fa{aOCToI+gS+#3w;DcC){LB5-5z<5~JQx(CV2Yup zF8jP)uRp}_9X^FWZrJ31RWaCj5iaUJFM8s_a!l#J-^08J@G)MVd^|348E-p`CYei! z$$tW%n~l$51{Re-vc`J+s`}@Z9^A-1d|UF5GNK8}##pd}f=ykT<$H5=(v=Ylmd&=0 zJMHn-`1`yegz}c&nav$dtZ)zO%0PdMa4%TUmkF`g`*R8rtNaZE<3-MG((ZdX*Y0&vm~VI5 zwM|Vn1>6jMvi}ZWNt6%Qeg9bJkd^rAy6Jqggvau0%I-jS;-VU)T8;`c39r4{msVP<$f-kWH_2@5#Tkg$hUz;ALG) z+=R@kF}dbFf3rfU$Ba{sR)H$7a_Xb*PsNP8^EcOmzQWXvikq?5)jNwU_CCTJ2pn<5)0kKhhk3j3uHKJONgzdnM;B4Y$KWd+%<2nqlD7p zG`e6&Fpp$U9Ib@4h}mDd{4kBO{cH zRaYLfl`)knt5(kj0a z5z6pC{}E&qSA;JYo4l7Gjf+Y7(EaYiody@1$%-7bl6$wDg$G{qvxjSC`)t=~ctZJY zwP|mdmUhPt9Vw+t;zf@qS*XeZ4KzJlO)a+)-E%EQx~iK3bXkA)(>LUV5_avg3fppM z9V-dFSZd076+*lOo-5<$u)#U!@HR@k5IrJ-N!qo^>{o3*6WfB3MX^diy zEkwXRF2_BT)0;_@r@GhXWIvXUk~6m46(4WzJ#Jtv%lit4_kQfw9&-jn_-aL$v4-B< z-qZJpYNA-=;hnDHw>y=8HW&UFl-zGaInw52+dz!CCtdOkq1K-+h`05==1J|3{B)^2 zx!6{=l2U^1+cqWFXz=$0B;sH!@VKn?!IxtNJ2T>nG*A${S3vWi5}2|`-gsAPYw*ubNO=$(*(Vv3nb&W zpZ>sO)}FpNX&#Sh0)C@Zhb+b&gCe&7n+2$*gu$PdS||sRVMlWiud&@g7RO?aywjsw&d&J1Yuzonk!6q&}pyq&jvacg)$an;(fNd zyIrWUm>bQ7r47rcmHKEq#EQXU_y;I&9wB^l{|IaVLRHbDZ)p>b zGqp}(nHmghA4XN92Re;CX8B72|BQ-Nim!j{o3wkTCU(XU-cJn>c4aWXOQ2jOS+O>? z|LNjlnt2@N^RhZCns3ugK^$NLum9Twj%JjI>*Q`aXKN5LlOf73R2OxeYdH0vi3M26 zz4mZKDQ}?`NL`&$yq{jH1%6#dj)xbWCZNrbS!K$|x31!e9uDchYfwyP3W9s5BTMGr zasw%mC^|*RkHzDCxB+j&5hMc5WHDbQJsin!W}JIDHC(0aWI{iT`JxsnK#03QKe^gn z2*C>%a#jUPjy(?^nH*Yw@0qqC<;m+-fI(1~dx!(Yi-rPD}0Z@Afre zmF8XH7duS>IYZMb0#gj7HFMsJub-Di>O|Xrj7A|Y-U(O;;&JawOB;3C{8ki(n^aQ` z8o6oHt=p}0KEH^=tUA`F`LBCNC;-3AS-1Xk^LD{KlM|Okc1Edn%fxH-1rid$!Tq+QN+||*j!jls_QJnu$aj1lTXW`E zicI=ZADnmaZ%qXCmh<7FjSpycxfRIK);a$Sp{UK2|CnD0>Lj%3z!BFYtsp8SiX>IL zYw3=EK`W}Q`lB+LQfln{jzPUP8JXVn9 zZ3_bm(cc2txW0Mp5{el&@&B@~RKXq*!?>Y(z|`>2R=f6eDZ1@sK7d+06!8JjrHEL8 zX_gGnrN~M;UevemJuw#i{Z@wM&AGa)v%Hu@z(v9a9teWZL;PvdEV_0oDm@UlYl$kr zhx-qbppQVQJBL?jaQww={&j4k!nBR&X>;HZ=&108Aars8!_b4j_PkDa7fm-mJo{VF zXb!rKO$5mm1k&A5IRLpeYT0ye%~Pw9Jp%7v#tIR}jN8eq>FygzUZZ0`Be)voinI!L z*Tqe!Uh_SJp4QY+L_3Z^9^Z5{cr8{7x#W`h{dkwkMdYA{xVmSZfv+n3<89i`DEwk6 zoxZ3GP|1hK$439mF{6%MyyG0?8n3 z&SaT(`tO?;$5)upVAGm+>Y`j+ju($Jv_vX~Z_~jKR$IyY@=k z>tS|siW%yj2O<V>aUdZ5WLmK7LbKLkyv z&62xew>7QT>D>$6Bx8KSH=cdB@$TXP&Suzv9sr=~9dclcc*2$t^in{kSh8YQ1>3hn zX#4o_@0G+r?cq=Krz5XAm4&Z-yn{{B9uN=ZXY0E>9OXd{YKNIx7OwphButitC~h}h zY=53yUvX`J-`y$o8izMA`}zBix1!xKunkI#`)}uSyKM#vA*M%BeS{tX!3Ir zr)i-|ZC$qQANCm-X@WW+G@;)6D?N*!zB^#woKG}ib(q!u1^Vm*SI{6b+^3)Iom;az zplb65>=zZZ4xv+FI5wVHc><1yEddaBcE!yt^iX&E@8N)Hkq8#jZGQ1B1I%q^BA+uZ;&W)UeGAct8y+{>Gt6PiM!VdamcFg6qNic)LQ!0X8u{KOURi;EOt00Ie}%nA)Fg`^$P3kLT_J z>Wf&xIoRRGFg?I74}Ufs42-a8g2y8(yzBm3L(*nOHDIQvqeG=zT8y~4_E1*7SxHt4 zuX9-s2Xjg=ToEw72_64)L@XlVi}w*0=i?6;#-H7=w@SabKQ+yoNn*V*C%E75p}X3) zhJ+_dG|eB6k;I$3>C)NtJ)^uQ5I9?@Z%(^B-+#9)d7oSTU3UhpRxbdIFy7c66~S2| z?C!VY#X2Qq!A7t|Z{mk`GYbm}BBo?V0e0*l*3^Jsz#^dmF z^7LW!J!tewgj67djPR->$4*@VWRn4PNtZ$7VPxhr(=WA4_Zj0Ow(twgBXB8mt1E%j zrD4OsKta0l7Z6On-L^W1q@NUYw+e5SYR!!vUm+KrEw;J&VjJ6I-c9cmG)!O@5Mp)B z0vytY6p6HyX_#I$eHsaWJ57b~VZ=k9x&|c{&R$lVOQT+XHQFX_%d8|&V=%*?$L?LtlmT_R_wSpt?LHPTzj3~A_WGJq7kjH+cA#@C}aGskjT zBU7|dKBLi+eLfJ%U{F45T6rWo`c~%BkWjf zI$7beULlt$oHIcFyxm37i=o6;S)>e&L45|zbGMEE7jPr~#{3QAPAW36)-Nh#`~i8w z{1Y1KXjN!sP1on})fajsxdf*aE)hJJ$!cOka~`iA&TU0KpJRcMOn81b{0ABdkFD1kj+1F2WZur5?GUPplZVrl(VoMrem%uv(L1Y^7K|nt->A&G%nO}k$gq8cSKYFDAmM|*dNe$ zePW6N`e-LK1vblNw59o;GXzCa<1n+)>u7BB0u!KzU;?KpHfMaL&bDsOJLg$4lW9Ny z#9jOEpZEo*tL|oHzW^;NsyT&KE4TU*c;oSgwnlmGvu+cC<`;!|arZO4i#Eou@o*@P z^*vV-Bh0;1Jm$O#h8BTHF9P*iN!-g)S)_Fd7{9Jd_>~iE?i5?|1^mYU7h}FqMPXi0 z;%x5VFq%&lcWnN z_9L?L-B=g>dt*R^VDuCyjh`ej_dPJf&(4`@34y6x$~_Eeq9q$*X^0J0NhU7 zm*O!HJ7_{J<;~SjK*XTfjfipwiGFafGv^0 ztVMEnh;PbTL-8)_)-4A0L$oIV^6u{Ua?4k4fEmWQYXc-$*mBGRsos~IZRP$)B^9k$ z>_npMw=OeKhOww)s3^B7U_5v4-G93qp6Nc#Iyd13pZu)HO!YseF zSG&L$hyc@_d-$wds!=)uSHtM)mtz2hbXk$JJ;r0%LH!ee9~b&MGl5WA-jPCDg#XA7 ziQ0Xujj|i4g!D*c0~34&_F-1i`$%-iqFn7fRFMKy_d2sf0!!aL`WuL zp-_90<&%q@`3+K}!LbJTKpdU~$GOHw7Y1ToC9m+hfL(ZWgia`%i{ty^3#Jo6fnuE- zumbFXfI@0gQjr?HL5NiCnV%y;qW*U+Z4#-)Loa8Hz7l}Zp_T>rB@B?Caw5v$| zwHCx01SW8QO@G7&o?ZMvf(Uw-pP~dRHvXHhyGZMXnrN|8HI@$#Xn;|8p+wQe?jvt6 zSn23bh_K2M2SP5G>iF@gO4!hO!bTNngDGBe@?$kECava70g~kK;-ENF>V?1Sovugn z+qZt-!Y`V@H{CBo`J@nvjIy%_zzLMi) zWUaXSE{2h|0Wq1B^r%(}Q@kKpFuG^u>0$=ld%JcyP9tp=2Oq_!pshaT^He`vPO+ zhJJfypOq9&1Z}z!XvB?w;a|@Ur+UjmYfE$W&_{Ju)ZJh{ez%9r*nP?psk3tW#^g$a zju|_A7fjg3U~_|@@uU-0*mzRV2a4bO>po`1u2NjMXH;LkD-FSvaq1*zj3tKE>K2J#&0=%!TW7lI-mT29 z7#QI&Vt0x2Zjf5J0Iq4<#&F)U z^gZZe2I&&zGv-g*Qx7;!dQIepK|W?Y>KV(vFC@e>d_!TcdXJDPv`8zz{kZ^MEaVD8 zYTA0_6*7?7Ru2+2nHQ`LuUuBCdiXQ_;hfB=re4OmKO+W513DuvUo%pxzH^d9S#4~- zTKX`mh8IgNfW$K#8CtS&Cf@q)!?L7dE(1aZaF){qs?fc<&EgU|LBl-=04!ORp9=n2 zD;iSN9{W9zV+iZf%Ktkq5UxR=#b|vOT}sBnDF`+Wl%fr@8S{A2C9_0Xb$g8TCnRr( zCTI(mmAJnUVfXDm#8x7yHi(0_t|A^Bf;okYLvbBXZR&~@-+Ld`Ul)}?K?;B10}?ZZ zZ_?Q;zFs~LZc9|(`U-(M1-V>Bqb|qX`x{9BGv0LI=M=9v%@Tza&9>Y=sc$eY6%EeL>K1NbJNYW@tzW7#x> z9S!@u)A*1i%rr>1YSD&4kp3)P|FUA2MvY!IeuTu3cskOy%7vtcS(^MBEAKwxA-UKR7r2`4ePKOrXz z2YVJ<9q?Y&EFZO0^RU1$k054u-uXiZwrVQWxG<LxW+J34+P zJm(0eTB14mfC;uN;MLs1L%pLgT!*f_croF6IwUnFp@Q-Xpi_B1?R)PzOqTS_{2+UXXV!K7qIhmVMBaBwD?`+hr%cyEXDo=@dv^;Z`c?ofSLU!}TJ5 z>{~vpWFZ^rQU^kM=ScIt3q57^Rl22U{!U3F5OGR&COxhG*}`w8A)6qE+krc)h{fOjCs4h|);5`$=9@KWF`4!f| zWa?ay;rz&EE?0K)o#Q-~M_J0;?Rnr>YopzwI)NM3T+&p1 zoW)Lf3S3HPO8a!Ft{zK|SFRsDG3mqzM~uv*;wvc$$cT$ci>a=m=Te(lK9l-6vTI%T z_Ci;eVRaHKjlf49-A!4qR!qLbogZ16*VESy8*e%Rpl3;q(&rw(%tEYJ4`wFM0dGW2F z;K+ACJ~V3YX_!bPhE>m4gu-h!{bdGaFXqv8jB+tTd6Q}@Q(&}4uMd1Glrlto++_$Q zZ;4ZPpxS@i*>+!_ra7N&bmjnN#dcTv)V0TjMF<7z1A+#zLQMDsAcR;YzIN+GzqQWE zV98hQy=*i|C3OsTE}I>!W<_)LI%Xk_xG-72N9Y0f_2s&Fmsjwv0lrQb2-~5bBf;{@ zR|e7zpp6zT=&w0i{qoEUHvdH?@AEtTz!r)U-NKyZdq4cFiiYBWRo|f0C;iY$6yeRn zJk)JD65bwB4Sd`vtRFV~-=8^`R-CpkIs+cm0eN0*U@g6tG`a@_m}u3me!=e?;gZvU zf@~@UqGJK`fHPy^8_>@WfoO>50eP)iCKk$_F@Rw=wIfPLR_D|cskAKF7I<}nhqE~m zPg@<0Jz-3QPi0X-OJMA~*AZ5;@J71gZ@1X|eJ+}Od*zptxrg=VRYDX(_;Io7W0rZb zt>Kvup5$I3JHR$4bA3*v7KzcGd_pQrTwhj_YU7jE6IS~_lCQG{)0=ih*Q-{GcU}Uu z1Q#h8O07!;KtdmS%<+7%85UU!w#c6Hv2!k2au)?6^0JWAyaEpGOBb5Kl^y?+&scSO z2_8uzaeb?H4%Si!)ZQC8EL7Z&PTx1q>kir(0C$56TU7iv5w>N$WrPo7*S}k((A<)^ zkCo50{I<8zB`e@#50;wE7e)QJ1{sl>Uq$uq(j+Lrtg?3|SVLIHOfUsAl-KB%e z*6J092g|>ZIAU(*C!Cl)VU3uJ7w?1yBD{)Zt(SxEn06Lqhu^!G3TuVr-U}womQ0Xb zkgixfVG!6Co-m3^USKlWD`=>FUfSs0w;n~-<@QEoiDo?+O&)1Z7e^P$lJS(MT4!5Q za-IuhkGeNy%ix1dH*p%kXPolHp`thM$?& z>L{fa(qu9l(r}p+>7o8lL@LdGq%HV+5BLYdM82$N2KXZ*nRoNJ&b;P&uY&7ZrP<;n zv51^GnNK*3OncI<_u>O$E(|gbs!PrF_x*?pTJV*`tITs2YCp(4KVAn3@#9mk1pf!_{^GN)M~}h$WOmH^&-nMlu33aR7UJBexDP z`MfPw{tR{wDtwTa+a&U~tCTf*gWM2bX%j-JoGV5kdxgSH3CX}moCq$bH-2iLn`oJ<()|8jO)qxdW zHMN!=5zW=W7&x}uC_`&9$_wkIdZq1d)8FOr5{BFS>&TOLnC!G zwN$P{l1|DH+$oZYr)?puQtkJ&U~P!bnGK-ez%DXeau1TdURuXl&YT`n-x3YGbTLAc zZ^R>sEy~!^a1)9OHg~&+IVk;))UJISwP>0(T76%BF@?q8eR3Gr3-KQGFfCN{uYhA2 z;tVvpXQM_>gJ=Zvnum<1MNqoR*LaGEgk#lA;jpked2Ma zYg#b>`1cWd4*pvtPgymtfJsVBDr%sC?SaOH4Q1ib)+eHKt(;beX%79{}%Gl1JfFnoDz|92=H`Ko1Ic+Q5jwECT8H2o)_01E{+zBWj?GT%R9FAb#e>Ei%>54;-FO5H}q!7@&q-EC$lS`EVvdtS0o9_)XH{d97nx4S>67M zQ=3Vt6|A#nkbL1JdJ~MDyU^|5JJM7YCwRun4frXUYG(h#;PQFfJUYHea^s@u~3ekfYfPjxLPJHHsJs4{w!Y^{|(;a~JlHrwnp6Gd`L2Myk{gXT%oWSsIzxTE z8^How!dF~e!MfwCdE>cabd~L@7)4w+wYaGR$6kI<5!d`^`)j2U02Jfd@ zetal$%w}W88PqUJ*}RUxh$FICrZw3Tu$RircJ>?*Z0{`9Ps@nUz|Me=F0sipH{4ub zU|(1$bJy;+vcTv#)?e+U!fmNMTLjaOrCTcRH;s+0gFx+=HHQe{Z?tKNhH=z(D_AKs z3@`dc(m*Ftx8+h*Sc%t)2vg0%e!;foi}MhU^@G78-q)a3A0koZK#(D-E%{|-M5#^A zxW3)U!Ye{%oX0M!42|wqnHxgu$3^Q=oVpR!r%MnY;zHbvM+UrWI#&3+dbwXR_q?NB4yR_4%CeBGTy66I;)HT^&&NG?d| zJLECQLrjp(8CUVfd)lBSYrlH!rM*u}uq1GRt2#sdGt&EuV~a8XAwiy#Xztcu;>3xPEKhSX``NpK`NDLUwas`RYoZiQQ+c{#uE?V}F&Y+{ z-!jw5mvVI$psUbe6vpH+8OWxl{y+mGAn*4;>|mOa<`g0X`PwPV!eG5{1Y=9$(76BA zMZ}8!icdsE%8cvTQH9wt8r~-mC_J+croXa4dJ}AYi)p`fzJ@8nxXP3|192*y_TMVr z5JCgk|J=!vlw4=~~~Mw76HQcjE@ zP)86LNfXp;scbm3oxcE@#(diJ?>C{STcP$$!Ax62Wusy_npK5XQzSeoaqoo6k<94e zpX;ru68BITOlrPa8U@);r1?+kNrc%gtYrXCX8)PtG7#A+sX`zXWkL4K3K#8>F2X7` zv?mFVo4+-SREL(FHp{30Bg)s~l(Hpq%V=xdzBP*dJ()-4TdZMNRP{vrYlffnjkLRo zKJg-PMTy_JDKw2=8b$RArw|#PJw4pm{RaG0d1}885KR>uF7xEp60NWeBIiv1mm*@a zp4XuDhApum+W5|6ZsX^?@^`t(;|<4f(sr_1K3KKyll9a@$q%@>#x{;e9*?-w)e?f< zWt7D?ka@BUXZX7CapDR44sGwH{~C-{ps-#ALo{dEs~%u~PlEfn<}R?8UN*MBD8PdK z8k*+)XJc1bpmMnChbFT}L)wyJ?+6aau_2NyvUp1083YR95-SA`)nRkjv#hy7dCg>8 zQ7mQUNro}vR$OPQQ#xfi=7F{&BE?rnmCYG+g-YM%m7V(U!P83|N33s zJZ%+zRqR4q|6+IKUUMA8YM(w;9xlV8Z1^dpXE@94mSmQM z`{kJiHYEdPDQUx2c-x3&#Dt-IQ@f{@oT2KX-kSzQ6hYdwuSHyeX%MH%v;XUbE-kq# zg4IBh(U7(tc`~ox`zUu=P$yi~4pfB6<%+Fxxt>he#wJfzE&3FMq9`kaToqPEA(GJg z@e|1X=ym^~8@K`vQTzlabQ&YBLH9L6_hs^821so!k!gOJ{q&t=f0Do!ovRozQ{eR{5{)Aq5#>dHR-lEYX855Qp@F_aIO*39X- z4Q*=;Hz&%ek<$Lq7oU@~>JmDVbRDeb5IrB%qGI^5jFK+p07;DmTqcyRY4&rToNP;? zNHoBaEcAnB=c17mMr{y-Dp_&3dq;*F=VNe4jJ#t=Q>q~P%BKR>g6~0%Y5>k3w(95^ z6U!DiP?%q>>Y0S~0y#wgFf3t{;o36(#rwvF+KK7|mM_+d0J9p)!P^s4rHb*P0Vkj5 zJBa8IVT{*M)nX;}4Z=u|_y*N!d2gXT)!iWy8c!A`!kSbww?5spJHW6BgZv|n@hFlU(k>IR*y zm%GNuwc$nSq2VhLfYp*DI&L#zmA;iz5GoUfI>8_ZBvAbJLe%TM!ymu_;bPTB8(h0; z3ybJY8gk#3jS)0|nQ@$94vcivJHh!XndBYoO~4Yo%QZsE^a!SLo~Z2rf3I}`Ouom%I31}4(oop^(ijSTWk6PXfK~Dj~At7#3I;WL`^r>5uSx!2kWx( zTvA?GFPP$~vpI#h@l|WZXjABA81bD3zZ_%r2M~mN;-QX>V96?dzj=LGM|A!uCmt}U zrXk{XP8dCmj%d}`jl!DIi(HAsVG%8fJYbtaOQvrZtN}D#Oa;uQ-Ca~Zc6XOmyBH7CBJQJ8x~gB! zjC_y{(;j7BV_f$EJeOPh-D`9B?3z)X1AS=z4xt%s(S4Tb&P%SHBjGw){Jk@~X?a;u z84gz1eMg9%)3}LCKR;HS(t{k^U2D)Bb{D)he=lqUS|L=ikaJP5i#ov9mAFzrk5;Ub zhVJJl5YBN*OvsQ~6^+{|7e+bSw{5Mwm*&acBizIE zpu??UcuDhS_l#KY@sEJuVKpz(>VeMJ1D~n}va!ZMF5^ivXT)TOKFzLu)77yCO|K!G zL56ovmW;w7vU!#IbscTn07wdA}2~ z$7&98WgdLzT_OxSzZx&LDcD~ZymamPta-xihliO7o26PW zmubkB$wiS!t@KpgtE=Zk4;SQ*zApaM_$RR|@q+NF#^6%O9jZEtGf+weavOZn-Xs9E zeR{1#!8#(;Y=ef@KkjI->)w-|d!ewP1)W5%!Q$`Az4e4bH7jIF zUi%|wOxE;xJo&1bx*MI~v;x5sAepr87+J%4@~;@sDNxnd2Qsnk@QE-_3M$Z+lnG=! zGf&<{4Ufg@*~7AdnR5T~hgv|C>ci0DmGJMN3ARZ%?~D~`Ie5$_)zVZrH3-i8K8SMp z(L$O%1>)U#qgfaDhRpu zxqOQBnKr71qt;@=Zj7u%dg4L zn4PSbv_Y(0+J#t1DV_BC%O)lZO!tb+^RM3(--=l=Bp=%PEBtnli0#Uw-nGa_imUMt zw`zzu! z`{r^R9}fm9QDk`e&f_U~)8(DO`N75LIv4ZPtxN8v7&2tA2YFay7a%GbL0xByJwa%c zfd}81TakN)j3Jc+O#?%h8G(9Q$R$o9z2`$T3QvEXf|PlUq#lMnKX z5`A_|ZqD^wrGSr1WgYaUGX#$cvEb0RMlAyLn_PPQmREJ!kCz`uxCDSC@1Eo%g0nPv zzw^V$V0y1R$qU`rNxk^IJ0OLK;n;NLZaQ!fi1fp8kjJdz+DD{HrbxR$oF3h*P+(nh znqUOog5q*ST2$($I(Fy#vYH4!@|YiG>`b0k8)bJ0?RnrXZfX@`XQf+YtF+q87N_yE zZvY&F{w=|IO!|X5gp#?@^n6`Y#nn-HKZA z0#?-X0e!Fhe;&@$BFu#bHtNBmXJy{V zWs}*d?4r8kL5M5IE~8gI#b>j-03V$7=t3tV(pI!b+8)mYAhGyAziyVXzxSvmI04a_ zv@TvlmhizvIT;rAUy=&WY;^7qNbzywfkhF9)&;Z!S*P@j4uh7&Wg6`#en(@&I;Q|C z5E+F*1&Jr6TxC|w2dWwM9KULP-4VY$Y1$J;+AAGfcl^HkmlI%@Rr?BN`R?~*3-z9D z(`u_`3UFTj$c@f5e{dl*a5W9WdK4a8B;~V3X@;M^dOKUXIQYh6_OD6O329buo5@HF z5J&htned|$qp@3>TanbW)h^f`E77wVZ}8ffFE?@MVz>7{JK2;3aUSoO5bOWqyriJC z_8=v;Hga@aXs!AaRwxuUR|a0Dk|rK|sX8UA9F~>#u$%{#L(Bk;j>s6C_o?JTjh?ha zP)F|}kMKCm)_1%oqri5Oc>tp&f>0?Vn14dS$%dctRAf80*z_n|RxiD9CicielbxJCdeHxRn7C4_J=poUI)3!X$cn4R76wHvlxu8>t_SB zsL7;>sE_PeWp9airgdLsU5KnFb7Dm-DC!HE4uEvOl|_1LV2nb4IK|7G=Wjbj!$n>n zrp{z|39*&T3wA$vOoTiU6!w3Uc?5;4X!;qk;6)4Jh{eWc*~A%KjOb9kMHESRdxu$x zF`9nggJX`Tf1mq#oIMQ^QB()dD+}{sXq@GDY*gN)?Z@GJD{LGlw1h0@-s7cBKdCB> zD=FS^aW-P~7|qRy@tp_6`+WQJHWsYk2KG=%7cn5_sX&}tC#Q~q+NHD1q=H49JB1#{ z3U)^ahyJm7BapWiXHVv^L(p-ea$1UL6EdB@vhGm@8FF&BAkGxx4A6MnuSypXkXM?% zG$dB*Fc%@rxKLg^3IPtRRfG|vaaib)*2mX=KY@LwQQG@t-cwwPHm4buR=s4jDK5mJ zSP(cF)t$Qk0wj->c%d0035?POr2OO<;l5OHpugA`0yBXQJ$8%##Ec5^?kd%n{s*S_ z@y~nSc6F8;l^rC*n2(wus#BdY_Xx}CH;c)}5>(!`wB0H@K*V9xGg4;v-jDW7J8Ib& zUTh4y{r8JbKDSOaoQPe7x=k1`L$XYi>)@$j*7j$m;9WuY6fb^n5E98i9!?T%gi5G# za(eU~Ol(dPR)((iqczUQ?ILcqsdnJxQtMPj7Zo=0fc=EAhp}tZ+wi;>8D<>YJ*(Yj zJlv8w^;#>RTj=gp-cBW6x)be7@}uFU+v)#9)_=!S{r~^tI686=jvbD1>>^Ueu{rik zR%RKM8OJ7@V`Ptzz4s2GWY03Pq7uiJ%1ns(-p}*%>e=)C`&}-ce>|U;%ah0BocsMY zuD9#;di%ZQ)jE<$+{+)HwM?X#*w$l}kekOhXLlSjyt7@v2hRXa# z7A+Ceeuz)dg{!nqi;sg}f%mZ`l>BL(G@wc?Z&j8F{IC}=f<~LtX z=~|QP7!_LUo+;b`>+5uw%~hwAv{tdtK0LLFxZwHHvVOYmcASsSOoGapN*QuM=K zSC*2+%^otN=R#@XUJ#WDc67%wOByXRvGf{Z7^k}2PR5CzQS03{<>c8Uj>T?^i_C3k z;fG4E`(nL}2~}!l%;M2J^PuFoty!FFEF@yyyx4PG{LXEDKm?CvC4Y5J7oXV(YuL97 zq*m`DLvPk9dptsYydt6E{n(xcZ|RKS1keE1))Gd(wtc>Z6ZcdZwQrgKox@zgr-Mhv z)Kajrkc%H-QOIjNVR=Ebibh<-U!sRT7#4@>4iVkTpXNthndt1k{^_!pqD_`uq^geq zim$bDRRWIc0jH4Zv)oFVLgYQOp5MLUM_mLR5+acMy-$8ncLgu$sM)gV!w*e{QXvWY zbA*T9*uLsYfOcF4r$>U79uqF%Uf&r3_3@|q;g9N;5P zLBAQdAgeg)n4iH}wpeptVXpAm$kN*0Qa@tEr582Iyz74ZiURDIg;|te=4mTXX460B zO`f@&W`JJt21KTam26IS{PFsg(BQo+`Wf#%nWZQI(J?K1Wo`TB9Qr}DF}k9Vl_}mO zT!*CSbtUYL=8SGmV&(C+0lMng+V-zrt<#D)K{XJNvd4$|P#mq9N4EmE=U*2Fp;~Nn z@(svU4mg5BBVfvKrIsV3&Sg&xGWs_`_$Lvfk4(TnAN4!PSJ28(^bZC)oUCa$L;R-H z!IYkmB9#p_!F>bLI6qm+a7t_2(>NFgPssV~bYl+?9~hIDqZa5_8&O7VfhH9@!`m38 z(iuJN_SHK^h_!p1+swyb(XUDlSA!PJ_1y@NI$;;_iZkU3%AEZ z7g=xRvYdJMGXytYEEghbIrf5S{I|zgXcafK7v*(t*pme zi(872zUs82lv`r67NJMkh+6i;yFXJ_jZzcr;}mktYgBH@9O&be|7{OBE+(6b zeuZ57o^@HW{iH}dPgj`Yvfge8Wf@b z^ty9D0;FXEq{{r-nAIsm4hB6n+bdy0Mf8Eh17tX=ieJaXG>*Qmk0 z{=X-642li0s2AQlk1)*fSW6#o4UX*UhSnz%(reRYQRruhB>H~ORcv_(9m|xq(QaAa zdWG}pxw9c$KqLO~rz|$8ZlV3;{Z*irC|ZaImNk5zma0tjYx?^<+5p7+k8KX=AsF91 z&AjD$r{wy&QQm{8vO28n`ID135)3Yu+z}iLcNBAtx_(9r7KFM0Dt?UVP`^Xu6X=r_ z6HRUQy{n${7NIC!j=%xw)aIV@Ih|9vCQdoRRUEt`OC47h69XNX>&DMa(<7rtjH*>A z=`}{iZYvBCT<=u(QB3c#t69!aP}E>&pAp}qqhOIj2H5EC@<0T;8*+Wt();K{)eS0J zmD_;Ocidp`kTT81)wpEy8=y#!t(jrFj&+nWw&`YcUFCmnPCMi;=^^=B7pT$m)_4H}su;OkzP>^~EyuQ5K&MBRiY_{iF-F1v%0Dndh`wWt$5 zpG7)dHx&G~Y53gcspwME_BJq~an}5v_-`E&X+`40q|&~xeJOp^u(*G1CQU{TS{dlm8bjOa(i&qD3h_E2Z#k(_Man)_^-G>cKKZrNl5z`;3HpK zQkuEOZxTPbH=IfD)Ox&j1PcmReU5DAvdqAwl=ORv(M}GoC0E5AbG>~F+G}Zic!0ix zjf}3pII%hnymhA_8r1+C0zyRvyClAD;uPlYmK-GyqD8r`4!zEVD7VvR(H{yyL?CMI z)|i{gVhK}}QjUeiS$#CZ@<*n+%d?R_QZ0phdm9uoktf}jG2R2~Ln?#e?^LIMCP|ff zYh8AG@zLK{ajY+!emMyC<{t+^-FtJdY!bn#B$mj8nowLF z7pm3WO38%tJ*dc?&5=(5RN5-4Z!G$MQx!ixoeKXPlnIE&d`Y5};S9h_Ejs^!ex=z{ zy$Y+b;n#P#@af(~jzQ6Yd#`8qY&omTzdPg@I8IRDq<`)u*np9@efsJC+D<4lx$PJ@ z!`g5TGtQ4GTOR21-m{0rOpH>D#}_cX+InG%tMnJYM>0Vfp`6J4BcRrUZCeKcf`EZl*5>u}jWIu>TuegU4jjo-l`P}8{g zMksBi0@xdvhew*4)e2{{{r5Z}r7lWn8r>k-g z7btqAtUr+S1CEEytXIeHZX79_SwTbyB9K{0NsmA+OpwIdJua$f)jyU29}j86MarcM z-3_uQ{B4I@uUr<*pQD6Z^)y~KfiYNrYZxpE9v#^MKJy7d8(J?W8&>wu)y?yal`1^9 z;a)6p?JUxdd)?tawDHFJUc}4~qm``R-<5Q?U66^oi!8i zGcXpZrs;{=uNrM@aa&~3+(K=2ePPr|l}ai;sFwJh-74QAM(D+xpit&*-(pejG+%ma z0rVHqKFg~3T@&?*7`)7o#;tZ{2BfmS^Iw9gKI27?t6mU9` z+d37t;eB<1W}1%iGQVuSff9n$oZB5|$`;>EA4Xx6HS-j}U_RKj;bVTOpR;vG(%wLH zZSAo|W7yj0uV&zOd{&pBP3Drkec4GpAeDyh7_P0<%gBggC$9#b<9wc8DbR*m_K}Qh zNSXvV8AErZt$;JpSAsCPBe5;v;9{J6|nWM==+yliQYR0`CdoJp|3tPuY zxbK*`D7hnf=*7!ns!!y8$UjoGA+Ww`%cJrQAXx^Tibx+^4Ye`&|F6gmHc`iOc`*D; zXnfpbLXC%ZZ@PYvohD9c%Gf5Gwx??EO_zbj?f08biP4wVDM;^ST`6B=N9h!9;O|GE z`!goc4-t7wyzFN1d`p);p41n&put=nVpn9ZvJ>TJ`;8@)1*1Q`x|OSA6DzTs06;!n zHYp~=^OXR!(}V0gP`;GKUK10#fErIK3zq%Ytw6#0@DH^~vy=H+*P4Xz z{lSEzo#j+hrz=RsFBd4>Va$TBzCGtHc>hD0gt#KW?Q^jzxsR+K$7ulYYp|MjV&`-0 zQv-Mda^Uk_jsN)o=x7kvA^i@TX1dD~IeGx!=pHX!DD3%&=qE*?Ct)--WTv}RY_*q} zXziSV(PqfaS};iwa!m5+K1C22c|3}wLXx_CzzSrE0%xLL>iB`Shd2)*%lzE+W1dnD za;2<0<%sKh%smHTp?0mSY4L&fQ+1Y_=A8?+kx}~P*EEc5bJoANbL!kBFk^z2p8>_8 zRsGW`9I(IPH~h-P!)Hve_R#G&+i6A!Pth|rsb>yv!MLPg@|D5Y_xaPmzN!S%M(`|r z{_$JslG3~TtdY)Uda}Qcmal&sS1DAW9=Jy(b37bO#9}mEsrap&GuUs-4qj;4#N zRthVVBaF&#H%8FVDC#{mEGm6I_G#IG1qUQse^30kij)hZz6_@NGzmt+(g3jx>m#or z0me?v_Oy?kUVF&nmG4mR;Y}yuk#mc}HIeDM2lgO?*y!JHKgsb$eU|X3sD2ywPN`x; zJ3~tR5{ob|oh71;!b{g=Wt}XyEfq{#FX8NHMlzduzqLDkmJk|%3sh85^X(2x(0b6V zOJCipWrqU2+W=#nY{jcmcl<~dFkW}JoqX|#KTrR*z(CZ7-jM$Uk87JQO}NbX`V^Y8 z#7Y|@yR1FuT3L+>{dHiC!CP>fS3uCCIw?QlY%e#U#kQtuULuIkr_xc+ny~p7erJjW z1{@7&iG`60wq2*Im-h=Jc3T2m0b-ot!s%;nI@ZdV<2#oIfH)=m_KWy`qckDDBAwaA z)Ad|J9nW3G%j?JVX0QyAM~n&*4O>+OLDoFqi(5pO~kXH4_E+_Tc*)4Q$m#V@0y>NetYg{_! z#0eSJ-IynqD}!DI-cM18F!vEO6oKx z*s|uDi_NFq=O|9e3+y=gyxr{_c=9P@7Fc1Z0ByN=xBHzxpM)yV71zlv$)4g&)AA;+ zM+Z0BvOgsqU~K&bJ)W{OFeudj9e1)(XSui-!4kD3ywy21sNnqtl=9rMOe&W75+mtP zImqNO3Aad^IfaS}h*<$!vR++8pjiL5#FNITSX!-x-*>q2+wVl4)VA%}&pv84FLXnS zY+a#AdkLzUCFx>rv907Ew1)^E`tn9iTlb+Lr(kujgC&AC$hB*y=<>Q^rJMeDllEf?g?+9K zjYL#ZI3Z0dk{fE-`@grY=aAYJsyo;NH-smBVb3I`AO9$uuwRQ=3~_EJUU=v#BJ<;2 zs7rn3NTMO7pZXSc{VvX1ZAsn`T@_>Pdx=<|8-MTe^Tl5Zwmi445^%iVbn?!szMBf*9_UVNK{ZF7iGiksM zku#Cg?Ag>qyT6D78u&j_STOXI5`Y_DPfeYPhx^f7u(X^7gRHubo0SnSfI;xpk(Lj< zMY>E(0xTHF9=g6bJW|e9CVzT3w_^DCNyAlxK>v*{qQc3j*0@Mbg-dMCCJ!3V0DtSV z*MhYBmkBN>QQ|320{yZQs{`P^+XIXT@3o}rs;&aJv?s?<3YnDzom!{QjPW79#0Fcf zIdj0}VfW*+8rgBX*B0_^TRS?@{r-f_>*vOqzcg3=?VkA`dt7= zG<@qG!YtwRp?koQN3xyF2&v$$ln*KG&h& z*=66N&U#uQGdz$~u?6oQ+E*(ws-1?eoYUZ)H*%8^yjjzCZt9^AE0aWP$N^PIWNi+mX^6|X_SDfho00nY2z zYnsHFl81uRsB)}=;t=K;hq6{7 zXMG>_3C5zur;`^Ho+$R{Juo|Uay+Od95}>4c#$#)-uBo(CnE?Vm5v7~Oyp3JpgQR> z2kws-N2*004qrBhq736B zq18&jOcna&=6;;Gl8ky=fqdj83W_^-sO~N=V~Vi0#eC3#Jx<8*A$cpDh>Wp_eR8|w zeCvoe3m5AqzgE)LCUTz(-gT+&I^d}BJv-)9TRC3OnS>JPp}(okKK|s3joh9pPp&d; z*52okbn^xdEyX_3bh>30^1(-g$^>k3U5V@X#0hx+`|%dS=MSobF-o|XU1F6n`+Uy` zBF7)PTx8W3wW(NV;$e^Rl)oRM8cx7I$hDd^%Ni}S!Yusc7vge=%G||ZGAHp!U04}R z+2(5f((Bn<03a|xDo+*JUsU995?vzUg<3oA$@}V3q-C z@Y$^u(XgAumWRk&b!bs?H7GQmifSoGP`PF6U4H1=i~Ah-uV9^_=|E*f8dbq>*76Q9 zm+5fdqQ-3EYsVL!sx{Zfw{ThetQO^^$5H3^upf!K#^;>1HiT;;s!f_Ee7U0gW`D;V zFC1M#?wPUSQ~mi7K8B(VW(PA{b4MSgjS_Vx~!z&$+Qu6rwkU{8A_DN8ab990R{{ zChoQ=Q?2`GYA55mWk?h;S}-3PWCyFdErlOOp(%6Y*7-{dmrL^FNQQHo#8+NhWw3ak zu$>n7FS=yOilN^A*iL*})5Rcn&s#E%RWQ9@2VZ%Bxz3(D%{Nx!mKZ#iH(8Qi+M{~m>e&IU@k6ib>Y`LIR6wh_mOA$P%w0?w|ku8DSk z-)2LGfES60^6(g>zscjn%>%im$9g-2w5d$Oj=Y3>>wyJ30!T67jKmFPf{Qvn zi_(SVX@&+xFIc-6)R>8D?Dk$p7{)V+X@{9b&X!nK0q?p7HWdbVyrC_6li#%6Oj|{9 z1I=crfW~ZG>SD;BK`Wv1FqiGJ61)Y^Viyj#KARG%M8+Fx7+8dfuU$5~g||T6@)66e zVhNLahNfF@c<`26uS9d)fwNad|Ix$!0D@=ME5Ck&iN=riSHKxU>vix&FYECFrDFYZ z-HDy)JH-L7AJ5BL#B~;f`0$d3$4mKdhz-42k{e1D# zuRr8)2Ugnlb(0OP2M9u9;k7C2Y3G51319~Q_!y0Pz*~oz>l$9)cx{8X<_SjY z8TJ>BlM*{7g^}}!tILZxa%O2wU}X#TwO7AXEzqw?J8RE!{2L#rysZd2hI0ii-PS91 z!cWz?oy~Wb`b6jv-E(z#jS~eFi)O%oEk8;YeeYSt8(=N>-YF?XsHarQ2x8GN9<`qZLz}!Fw8J$i{_1sG@_)H{+htp?#NsMH0!b&4&_6qeGYF=~*r< zW;}5a>MQHb$UkBw5^!GG5H={MRI@DLL2oNi)&r6|DU0gjIX$}Jd17Y|R0qA{Bmd_u zX~EkX0nc8k8m>giNQVN_f$&EA zX?63+L9U-wWYK&pJ16_`vcfjB7*T2#TakLo7#%3y@%1~kcdLC!w0wc??>?Y2Eq(pU z2c2wQn$10cFRrV0jzV^HxIyXnBqk~e=nie`xh50D#B4^hZg_q(@(+{DSbQ<>_5z#p z3yc@+)f0j3tWuxKejw?J+GwU&ZMg2TO!WD^op!9WTjs2EG4|PPuaDHF|5mUhN?OmOZD3v!H z6y8#@|0}vd>D?Jk><3DZJ%Ch_C?fTwskDV{%O8Y5(%Xvi%TsRLe?Hv7EAalYf2!PeiMAV4UspV}v zB7!VVg9`n`p=2IYT{NcdW#D;$$NN00L$F(e`MBrO8`sr0S3*f>ud0WUAmZu-lV@3K z-DS?Udlh6fBU_^B1yfhwrpdA)$r*0wgXV*@S49H8y!&8PMXfUglA!0+)g9)#*Gt^> zdl30uNWUmwPC!Mfk=OiXBb~{T$IQMq!RZ2ioPi=Kuw+u~vtBt6p%5CkmKwwiBK?24 z(xBSaqojkh+vpP_>hyvrjv>^l%1 z7aOG9E@m;z|p3z8UhEG;a$8-C<9QA2@~;G zm@KD^G(g#GICLA>=!MO;&=M$BQL^*ejlHoz^+i-z5%4$Rr&>0iTpxd(kKGub>={k` zQ>i9&_7KaVdoOlTBBn=4FF+k>`3#?e$`E`V!q#CN)?nkzbB7rLkJWtJxbmm=^A|vF zvM5$hnTX0rzwsdH$CpWslHv4X?|Lf&%e}rLHLjukF-eSy-V<>dgPPah+dhL@|J%nV zQ?Tga8t`{_bMMf}N9|4L$tU%U-BOsmf_(gG_<09s0ulnga-Lkjy(e(l0JDUGt`#>- zCY+rkY5*G$Mnr!_w8jZ1F#NQPPa0r9?=-^qP4} z-|rFUz^|xGb5kKd5zL;;ma|4)L$Hp30eD`0sMYd`QUnXX5)|PKLTVb8U8%@1{l>4( zILqCw?3>P#5`-$wnLtPZiy$JH=Fn%wfp+RC_$_Y% zDeng@d2EZ*z(DX|>a|Q9aU==!X2ekFQ(?PFRn_CYt5PU;%yql0ctc1O;X1I@U)?l#BIR!~A8_)><1KnVNImJ3F0dxy|tjDRaK% zx457d1UXa3oB8N4Q~NfGmaj2VGgro6c%jCwWwhYA3-DOux_(E%r$dj&U#>s31(Hzd za&~&0(Lh<_sbA>nDY|qv^UBNF0d7c~z#@6P<3rb?Z3%5088i-8Z>Dj;nP7(PtjfB@`wR6pa7|HBe$G!B-X$?>;Q&#n-^L^4IjF>9-1i^nk(g}!lkeFfg}>LbIJp0`b#-{bD#USv8j(?X&^tKSsz_$XfeZ-osE2Nk-YcX!qg~D~zP~TimOr zSWwMnbeDH>b7EXW7q^Q+XD>Rh6W*3O@D|_kzOJuXeTEmE- z<*?ekSv!tJyr8d3XJbI0W*~79>pW5PB>WZ!KMUb|^jJ@*CPty`S9 zcX+!J3C2UIFKDA>*c%x?&HJCMV1qBX9`8M%8fh9YWlDBSAA)RO{hr)|a5c{h^V2?kqWwwx@VmacJ=+iRjJgPtDHfZ&hO^DaE}( znEz7DaggJ*{rS}$zAz4(WkA7RE8}!h#&dB2wpCQvZNV_Wp?tR?t@pJa)FT`9dOdOf ziNl4&QtD_7H3fMXNC6t4a2MWA%4V7a8?Pin7~m|t_wkxJNZ@9#9vjg2L0`=`Ii#JD zh1u*dtfL(;M%MB%gnPj47y$rNgYogJ2AK#X3({MMZ~*)Vacu@nq5{915i>l;{pIZO zr{4PH&SYx2BPWo@@0#{>cMHeR*N$qdV04!yqNM6u-m@`7Ma`UVmqlL-fLteP*;Z0f zc?%qfni42n#jLbGrOU?-HvBpK;0m-QbQVVS zvq3HsA5-eaZgy8)J-(kzI_?5UnXW5?l<5sTeFlHH0MfoM^7fkCsQeq!z{Zd_z~Wrr zyLahQez2jf$^CV#Jm~0t0mmE6SA;@D)V-hIsNitN4+Y|e&lKW zksbaL=|DQ?fsXG0yY#NQ-~JYL?zq(Tp~+$9srZ>Jo{H=m!l~z+N#{QKVG}4WX3yjw zoDR8BfM}$WL;AN?!G=NVA{E%rz&J%+p-59bWxiKWuq0IHU9Am^xba-B@zgE^goWemow zsf2`tYB^FqKk~^%UzBp-5y|e|L|K?M*#oE^;x-(-A1Z7%#w6-kHggD8)s$CKwpB09 z1xncad7)r=*k>^P)?0roU{wnO7}Mh2%Sdz{iA9Mee(=tn78>Q4%LqHL_6(b26erKqiQWVYYby8PxI$j;H zCxVtc)0z!m3>LHlE#<(}0S}Rl&@2_Om7zin;B8!-oQh)L>4S)DeLc6|3nRSQo)Fnk zxP2=`j~x~&+l?w0U8*+`YAh}6_u_$o5|~&%<%IK3Kw=Aqa0m4P5-~T97d$YYa7ZE$ z2Kte=z%#h<>u-UPFFE>(Sh_NWIdjFUGZZr9JHO-jwi}d7~pO>OJ?Su6%jd{+cdb z{-%KxVzV8{Gfga~NgjcA)N@&Wrvve68*QM?+ z&_m=^PFCs1`jcXEn3zxC3?o1lkqnlHk+3|b5xi>_sBG;tpyDQ0E;KsOJGeHInerW^ zmme%o&IR+jUPt;q0E;yan!q_>9F233+H!W}#Hxd^Wv-Td!v6Oa71=FFGT0m$l{B-M z%)CYHrxJiyjcHBBjeei~H7y1)r>wjs?p~o53PcTi3>>jKYc`$msUEEQV$2A?*j0^- zoM=VY-(a`U%S|1p&trS5Zy$g?V9!0jo0ykngzqTpCyanHK*Ht<@})j4x1pS1s#<__@zWpgOIpNWGu+jg!tkgKi9#Z zQYW*t98@>}YU3dgD@$6-VJ2}q50O@N2^B{5R&5cNQhDJQx+oX@OA2`A5J)<@E+hV) z1DmiH-n&+0#o@CX@J*uvMPjlm1KkS?3->Aqp5wx)oF5b`K~8@R?63=Fc7C56R-RDS z@@QwFzcEsbu_dGk-n+}n+w~;k-8~G!GNa=m5gUvSAT}$ryUVUW1}Smiw8Z{x-iu%4 zGDf2fiB2F0D*%Ca%W)6Sz7!}5w#W+az6Gy_Hzez$#JEVH(uf;p`Q5w4S+dP=FAss(9^mg zKBDSYMLyqbCpG*|NT^ad%0Hm=CvjSk5`YCCujvlt*aI--`SR#0-QuX>9BET}J44rC z3zsOrDB!P|0@Ng5t!T**mPU$Ac9Y5W`(eOH6@28#!inl|_r9}pnsjdL7 z{(R@n(<{X)@hmT~)y54aFO{p_0mE93hKHm2^^`uHnhMjifZ3r28U>H#rKx`cV$eD) zNHP$Z>AIeE6B@sI7#mIisx%YUqi5m1j9b>|4L~|~_(d{f4bZP267g0%uy<8i3zY0u z-%|Kox3|n`>FT>iTHMyPJxZ*5e6#wL&1x652mDD0uECGBGDW9PE&t8(klCxBgrlxU{2gC$Z=aWjboKi2#{;MuQFNsiDnG&ZsjBDAD!idX1|u4A51H&yV^IU8PibAg@%RSNvy zS3sI4cd`(F^ikM+N}=t=8=oJ;JQrnE2{^H?1l(LuT_bG930Q3KW@RXu^)Wos7KezP zwyN$2Wxe!ndbvy)!*&(*>~p>UtCIFk1tFS+Cw#(;Xf&unh@vvP5-CbKL`A7o`9W}@ zjuwvpPfbKDhb_$wO#L!hqUP=0vetcL>mqWJO|d&A9BsrWKACXB2P*&Q{=7Hk#9kNg zA}RLQ#{>_)&dli8w%o0=JPjPc`=%3g@g(%s#eq(O%Rc=&tJGxOOmR<^J4FaeBWXVZ zTT{Zt<3R)B^%y%?O(H1`if>Ml23~#r6Z`_PH+L}jeP0i}7Q4f41fKbc`~IwQ@*czp z)0ZroIFaP&m+8n)O0(U+JY#LPz345r72xcL4L^ZxiAS1sUJxnLl-IUIzH@O#)w~7D zoG%K>Haxuju@M&7Ad)F`2%|jRZ3<5c`NQFc`H!y)u(sciS2zB}Px!ShzDyKnys`rW zmCd7@A=de*u$4;rP|k?lT$*bpQ@RGcLdq>Wb=ScLnEvl`Y*Ah5^i)*rDV6$(>a=|J ztM-@#=?cqSBP`-61px=$W7s0r-JSflLe8Lw!~3$ri^uSD?cN`$lN17@08ab5G`&Kg zv~W9!h%}Jjs``eZb2|0UWXLyVyy*&;*L?~UX1*?gCmVX z-@X>Dxz_E=bG5UoZ1n(l(R5srV)+YNv?O$brC9oS3iEuYF=0?fRGH2YgSHe>u4>IO zXqk|70yqEd8P;sF4;AI*Ri)1WJ-(~HNZ%4QJj<9(lk6mT7D}Y{<3q!9`&IRk4;$Yc zdjsn8`T{5`$h|n|S`A%J{Q)w?SVDfWU+sQH_86*ZztQg6O+)&C-z|JpAZfGVS&}j7 zKE(ba7n3wQMK||^#eRLUA4=(tAHrBOjy_bl-xDedjpr2(jnFz`0M2VoyBYjGfIf1% z&uA6nU~YByZLF;~Kx;>aB2UP+G<-z__<@G^ehD}}zGRJoB`H4wM-6EBY zf;xXk>@0!oZR#{097a!HZjVU?&0GI?PDgbw$;@ zv*DkVWd*z5%~-SB!m`X5`0dCA*H_bBGvT~_`*udpj^zk60=A51R*-;+0}!)U%bz{1?PHjh+e<_Z}+O&$pwjHm95#5 zp^2Awp;*>dsFBy-zeG-+3Q9Vo8@HwUqs$D^ZFyK?I%jB8fA_U50`&npt8F z3nqB7xJFy+hF2``qX@5L-GreVPL6R?9(R0)uy7^8EEKLV0MLy@IHu1lkzGkxm^rZF zjd%5R8xSgHWASQsp*NKoW5G(*4H4E54zoaD0fbK>D_hlt4^7K;kwWG599*)>ZvXX5 zJ!)mh0F9Ben~U>S>XF7H$ZJ43!3z+$nqpzdlVNJSClx zcMqxaup4LM78L)+6;*6Hv4u-W*n6O#<^WmG3zPmq>3a7PS+pdQc~M8vNF09MsMm6L zeU4@IuFq3nNN$*E5dgobV3&D4>}TEwnhuGFC`rwsKoKxKA`EQ+mGXqrWAHfF^x*61 z9k!=z0Gs^;MXq1#Jz$}T1Rp+UDF?@cg9?KTU;4!XG$f;u-9^%&=6tkv)QY`VYEEpI zM4mGLJ>%nS{`4~@S?kbu5nnmxBgnQ?r6RlQ1nC0dg}D@YF3N5T(Z;u#q8iDP49s9N2&ExC9u27s( z3kyOjV=nH9A@f)gqM{c=xZU@g;EE<14vj_&F3nqGZvVK@F~C>T+;VTZ{ddbljLtVE zD|W?ObG|w*j00CqUsSyu+x6v>#%p+c>`>~Wi*B5BbgTL|hNN;>xRPmSA#a!5%B|6+ z&>NwSrsLNHJ#JyQF{r)xP30V%i$Y}Y_HqB!UBqcd^(IkgN98MV| z6Qe0bBmTeCSp5*RBvGAU@vZ`M?4Z1(>utsvu3pv2+)@Vm1r35HYQUevYGSSa?>T1+ zhHB;tDFD4eH95)p{a<}iw8_A{ZXu(Vggwc?JnmxUm0uxkF|*`wBpYv4C`=4Pk(p(( z_S>k=5Y%w55YCsc?KCHa#tyFt+K9C!z z{=7(S8eCkKJ=b&{i5A0z+Hw21{+7}cPymV0up*ue`r4U*g5v=ghiq;s1`|}e)eSxf z>S4n-r4I6rL_e4#(p;pQBVrR?VvKM&2X>It-&q(K%4ek7mJW|3vl3>(+(DS4Wk-(a zFdH{<^L`O17bNq8ah$7%Q44@P%nu& z&R%B#%%io-Jp`YHS1J{!-KhI*2He7}0W4T|5eS+?-ZIaeNtZ$V4=PAvl)W*4oF#Qe zPC2m>7$f=E20^tQ+(L|ahoo>}m3Je5-QS^=;xHFzV04LO!odN6U^onYcPw2mFjtaS zb)rc7(XlGTAXmkdyfBhIHsEPqjo9W(&|Synew_@3MCKePjYP8`XI5fmYapc4gK zR1q|iq8xP$`Qz6+yWe5bKMl!;^xb%ouMqL3Gx!2?DP&Kdy%E0t77R*%_|;R>O9qG` zA5zs_0Nw8BsLH-`^246vX^d@Vg2TSp`e_VUTuetfUYHw6T_>8Q7kKe>XUre*=`df4d$ zcD(4>;pPKZtd+0mm0SXBpF^6(LAB2J5SG+|@nbLUTXil&nWr&VpGzIiU7#(B9aL>@ zslIi^lsqS-@LFkh=wI;oYA|rtwxdh{iESwGY@bzS&$s10s6)A1-H6nR)oH(&HEa^^ zV3sqaZQ_8CRFBVxRmN|2x7_plM>Vi&Smdb$*mgP0^%Em?{7=Ng;R?Y-pt@aQ_)qUm z$By2b6E|*p-1P1EytP>85@gZB7M5T?Rfgz`rk|;pE|VF4c#nXWA2S!rn-TO*eiF#U zuK>=kPJZIz)K6(yr0f6ATcjYkN4s2uV}J8n5D}%^?z7j6i_iT}JaH;4=9GxU7#Sqe zNIgy98sHF7t!&<;6_GAG`KC@lEq9r%n*bsDb9>=DxDYXi+nxG2pzbfq3&05-bPR=b zK902fl6^yvY{o-t4%!}yLI#aQc)yUl2mdPV55H`^olP2|o z!omkzbFH}UX`mCKT&Oeb5X(4JNyhZ?0#rrqPXn7^0gxE(%O+dT{{E#o6$`M2Vxrmk zp18;6Qvg$rj3?&hv3&v5HXKZu4X`;djotdn8J|@K0pJ7|aDywv&EpJf?_L92#a!VX zpf}O2G`v=BR9^sEHN@?czR-W)&L|!fkY7ixs3`t<2swgHN5%1HfNlE*uwHorPj@49 z)zWVihZ__|4g+!^CooMZdO%GuI5XSoX7d4XQC`ApfzbQ1cvN2-u%2I9UoT~1`Ww@F zL5VtW^lsVaKP9RHv01yH@jh}3pdDa|>rE8_6`Bh;WO@bkMckUt*xJEG<~P1U!A$sS z^(ujw^E^!!H5gXBud|1~8(fVHpNqxuh5)Rq1ZFLKGBPqX68wk%_RAJ}AYVL^*oBM! zd5oYYVq1X$hnf3;IAsFo1XF(Sb=QW5Z$NE0T%~>yK~$?VL>NpSEl%R+Pvz(5Z+d0} zX!|9T*3DL?uVS`r2X}pxz#eGA&$EAH2Ad4{w)!JjWdB1ZWzoiCUj|!)MVuD^vcg;= zN|Ru>(Eb{7@!}ic)?ZyC3Qcr;&yGjG44U&|pj7~Kdb*KypOc@D2H1d6Z$&FpVsm0E zQ?`#9IvAkXv!%`t}vGP4O3G25x(yzj2{%K|YUp*Ma2adTr zq0(09-@_*bTtzBwWg!v3;VIL7%%@!!;N2PBIPlBhYNKVJ+Uu{UopARL!%U{3vU zs}?xf*-?R_0%FjZ?Y8~6a6^Rz%`}1Rh&asW$0h{O>>NOnilAUl0>jQXfPujLD(KZT zX0jRZtim4BQ9L&3_LKedt2+yEfM4r>f{rK~UcKF~A75MnvoKxUt)$RiBs}Me9yc)w zNfJ<5+S-bMcVWx=1?b>u;0t;S&>I$Lsh~l{(>2E`z0$Zf^#6GmD)27q8FTc1{@_3F z0%$-~?Kbh4h={jfX{aD*2XR2!_iJ;H&}|;yKN?gTcIYLDC2X5ydkDX5gPGxSCgL@kj$jPq8GnQpN< zSF7u4(mF6_oqYM`4Dc600RcK?x__%&P74kKupuV_0IRy70xtnye|5lC>pG5G^70vz z^y9|+BN@}m`c)!RV9fXiaN#`yaI>X|Lc75aPx>TX#lQhG=1X?lb*esIw^i-D3enlH z2Y7grjQ?Y`T$xbtzbgSGuYf`ucQd4N!Ji+?<@lNK_Y9dSvw`adO6r2+Q!r^>dTPgT z^Wl_z-|NSRsgA}C6?j8gaRLgtSjmtE$|d%!O3+?XM!p3M?lh3uH|r5s!6GYlH!;H+ zGh?yH*dytGLW?&E{#dPvmODkEH+Ld^iwq;1z%OhZMNFHDj%S-GG?`}NdS$G znDquCg>)h7@Nb`*Z{fCm6QRoAKD|;HeSqU#lF;zxSx(RX6N@@0p_(~T#@yOj8ykk8 z2^D_I72g3k?R`YUyqdjJB-0kqYeTS|CZ!ZZMDqz)y#zVhl3M7ZG-rf z0;p>0i&zUB{;?;6j;KNG0G_fcm^dmbD%v_JBSWrtlpq`#mt4^3x~VE-JrKUuGA_6Y z;?7$F9wfo$2uMrNNG2_kNC=X~3nVtMC=#SJHnXjazh8w^3Yhy19(tXf`glm7s!UqY z`Pf5Sz<|j@sU85$SB=kk!a`yVaxC+9WGryqOa{?rXiWCJs>UEP!4?`Dp>QynBVDYD zt^?^JlXg(rndAeGvPe1dDe@7xGgOR>SAj|>5tL1)+p=g|+~x76$5q=R_r6{Gds_jA zxVI0;X9hvcbWc14Mqq;g9cMm?xGCVVIfYBP*Vy#Hu%epo&6+U;$9(Uz(D8v66jH{H zYasROb->uTIh>t6o+G-~`~7LX82A{@RqzfAK2^Ew=Fk6Rcr#_Z{N}RzaF<_=emi4JgCPMCIn@3II=%R!5Q? zAC*zJnL=MLkMg^(Jhi}evT^}L)*>*QO5(Z2;z3f6_wHul8WBhpRKcBs`|EGp>aP2H zB%YFc2@s2jgj<4P3XC3|x%T#Cve3}>>t}E~jfJXu#Wax~)5!SC!HduC9 z9?$?3vA%%;m26kXEiM={^R?~vlNy0h1f%2_!=CW3U%%dfl-C5+`PgVDZnKpwz>*D7 z#X3VT!I3&3+H%ld)lW!BfK_Y8#sbknrzEHae~kKA*ZxgtfOM?0cmy==&EYEoCXK29 z@G4k*532`Eo{gix{OP_T^didS8cj;&Vu7`W839_91&Nl^&W(EtIw~&Pw-*X8w0ysqp8laYUtN=Y>>&?GJ&frqt<|ax9}7AlV6L6M|5HS25L=9UfzjQQCr`#V^AZyiGj5R<6xzY6aS0H>(UW0pQG;x~8P{BhJ3q|k zD-d~zZhdMVyUlj-bU0B}n}( z9Jh`JzKkEw3i{qC{9r1KLx~X zW3sS;*mbZLH)H0Kk0@ZClsoA|drP{yBF!>ELYwLUBrL_~v%1-k0WB^`MS>SR7Fn_N zfT-%Wrm3}V=_>YfV_f0vcr5T^4kdm_49ZDX|5mH&_VV8u0rij>rf~ z>Yp7*X03xwaBI|=i@#6*;Q|!)#}?~!gMJ%HFGvyqNeaX=;|se> z^Kz$QmAim?DM2B$KK8Z_4F1qw;5`yA4=W8NadCmtX+H4WpO~EdJv@{D+j3v>SWR+J zA2BxIhE*?=n1Uh=P{QAU-lP~z>A_3Tt~oUx&AEC1M1TM*<2I7`mk-scN7)$yv!XY1 zLrXY)c)luFz%dsGV+PcC{*}a+$xo`O$&^tLOhm*L#3d{kQSs zoKAHRafHl_gUX1?UMG8$J&rxgOh$GJNn~V`8L7-Nl9@dllocYgl+3J#_}?FWQ@`K; z`8}8Gxvpn?&pDsZ`+cw1{kmWGe7=me`y<6jjpiXyuLTntrWkAL8&^I#P2YhA-*_jQ zQgsxP0S`wJm|QRg*<;reXBa|pDzV;f8$%-S61ky*Tqa>%=KCk~E1@1lzl24C=OO7W z1?oLw-@vC^BxZ7Xo4{kBeq28v$M)fc@vUIxM>$zp z>_civQt0aj!Hr5D5A`A1z1-L(GZa)w@_~awmQCm(kS*XfetLHMB7;ekOU}0+Kl0yE zR58Q4=Z7-3&*Q_gr*zb>>FUOZl-vtIrG*ocn}C&5*6&Hoas?FXRv}t&0F6hztwj8);aEAVDcaZj#W)n0tS04PG6ZGaxMrshU1w zA*;^f`lfXC%%M|)d_bv)1^p!Zk;;<30(0}tQd^E{7I^z(Z0);<_4aXmR9ku>q9NhR+zmxwc5gQsA75 z!SJioNTt@mWAp8#3_0&$BHBZTa>$fL4M4AA4|#dp*dPlofYKpYRnDSCmdR=XT%u6U zuvjpItTn(}3HCt6)VzD%De-J^A6^)A05i=7qI4+1oWPk}sF5bXz4t?v^zk09AT49TqHAF~|~G6f2sP;fO|KVHhItd-}^ z5Vs}M@FOFU>Ea%-2*V0!$Y2sGawy1f8{Y)QJ!ji3WTWx2*Z_RUlKC`E1UPKUFTlF6 z=jWehyzK1q{tU02bUP4~R3UHS7m&C-cl4M}PI37Z726HJjrq8>F9Y@y!DMv&cA`Q| zx4W`tAp|EGK*>k{^wOA_*aNjBb-9AiPG>t#rT+w=#DylXvWoHEy?dOzP8q?$@o#S) z;}!ULyl3Pfi8lqL85yVO=1Bgg8Q$c?mGl6Vf2*{6!_MdCw)xGK?=q@_L%yuC$vWk< z{pnhU>?vdpgI~#svTI4CAl%mFlx%J*5A9g)W+bu)g&`wKQ*QIs=_UMOaanS$P;MZu zeTB`P$4>pml=q=34Gz<7z>;I3g92_MlU`7w$H0mEH46+XAs!ZS4)k}IIsQ5aJSv-C za;9}UL&^`$NE?SKze+eoQ*_Fv&z6eH#kon#2@qdFSB_I<`bM>)pg*}&9~Hb^Wi z&Or)MB=(qz=I@=Wj3kl`zBF3L_af;;l2Czx2FXK&r^ z(8DAh5#D5(#4)s}Y`#nHQtI_OMZ4k)GI-2Kz7n#%DM%aBWUiU5oR)RDkxJ#U+~@0bEQb<> z|BZ1>3>8g^7XJQ)S1HGp_o30B>ZxliO>(=)wP>6bBFJ0aigOkF?KjE$4JOuG2H6B; z?OJ|L-72I!G_ll}41hfMos+hSCV_#ot1y5k&yXcl(%w0RI1z+`#87GTSsQ8OUg*v! zs$%5P&1How>d;5$h=YcJ{?#JQ*2OzK>+yeUnc7&Q<0s@ouj4Cx*Br#Id@Bac7&*Bw zOt}UQkdF&b;o*k4qdK6Etk?ZG54b=^}G4#BlWO;l?WO z4mF<1e0?_aCC^q=cS}o)-Hm=~ZViWy({2V$ofttH%ihCZ?zK&gw1a>>neg=ELLlkf zjixY(Iz<2M>uKAEkWQyzOrjzaVBunJGqafgGV4bevq{|izTy}ofVsWn$YGZ4&G5>L zH-G(Rz&jHBl~Xr##(&7M-Ep7kQt7*Ms?_ScTb0X15HFz^&w94)q9P8K=+Twi-EtL& zc9~iF?T+P8*;9ntCVEUG1v^VxY!~DEaR20e?QFCSikDmJyE7!XllaYaiK%`;l3d^1 zilM^tE7r<7wJZu!tkSEq)HZb<7CCWm#hW#r-K5&4Ggdp>Mnz?1Od4)n+x0C@UFlOg z4f}pRy!(0mFddx)^)s%g2D7#XBZfw6cH?4Oyg;7j9^Ksqa08G{!Oc>=HYlL)HBt2br=PCP< zBuuKJRxPyp)_T~oybNaG*#Lxa{3@!_jNN0OTTj#S zDDcCJRL|Mo`#BRL#TwWG!AwxleueO_d!|4U;(@!!cEDLqbf4A)Jz3xPm6?KX6<&+) zl|r~B$Zorc$tpMVxN$IOy{c_{brWMr-R@426mpLWb>Lj<5ys0?DUnTBaiF(VKCT%_;9dKO+mFVT30k2at&^T+qJZU&`lvnCM35pb5q_{ zLD6!Jr`5oYyS!LIYeg7dxkVdc2_duqm+YHZM$L>v~e1bx>Pq= z#5iI!PTH5+#PZr}|1vSj>PhhT8>wr!2y->FE+bX$4>?*&fTBI?DKCTMl*$b>=rnf` zptipF5ikV`6aBL0h^?)&gRPmZyn6GuG6;A}89)?f#qT1W0i3w-vb&WpA$vLxtbvpy zO1-R@6+uCUTLY~7*=6s?&|$xGXdSBoMe6EY#ZRp7r|he!*hT<_(4kYwlmIea)zji5x92+2Zdo}d(PV=sB-PEnj%?(AGH7N9)JI6_fOSbfPpcv zyLP0NOyZZ7DNDW9>&1>x@gF!t&iM62(=vt)E6XXYlRkVXyYVMwVx&XQHwmAEUWF=b`$JJZ>G_P>63TT>7*tPJaNl>ycd+D&Y#j#o!x`xa(Cmvb$ADwI1DTbHX0=^t$L8nW4sER+i^!V+|BW)2eMAxnz1H}UAod4G$^D5xw ziWlGaFF6ELWCFVf#$28`(jncCMwot!#t z5e%qwkxOPYhe|A$54iTEGP+r=&yE^nbdKTi6SHqjup)I0Qv*u9xQI-mx@$?1Gp{u$>{W_^#2*wZ6QHv#KbGF_!)alvv_ z6GJXyO?$(-N9wn4j?}puE3};6epQ7@V&A&R6ot9dRv-B~`7q7>De`GS34yGBN;D*! z{Po*gHLSO%fHU#@^=vS@+dnSg2FYRDsUi0n8(1MZds0d6>L5rpIGlIbIaRC&OEbc)ggMPo(x~$M&DFJ@v~CC9SSu~8Hwzm zexd0=hyTd0+@OZU=bkX@ihn;$dE6e;FhLKL*D9WwSE<|(Av~_s0^sx&#MHyUb;d?j z4P@-&0rtUDE~KXik`m8N=+3CT^PI;yCh}@OvC}%I(TF4~8j!5ai!Cw<{Y_S) z10qRa_E)jE2m~eu9+~p|2i~P-^$SaLi(W>iL=F~!znm(Ip0-tP@CA2obV*%|{clkT zzTcad7?e-~(~Espre`v*_T=|-s>WN`33W3fbxZWYpReEK*9n?0h$%e*RH_6i@teKjR3p8%|T=fyUkBcLmVzteWh~) zz^*JL0=4`bfmn%Cu?YVfghdR%Xd!9T>H$d4er3S;t86ST_PE}09{}Q=R70Afr0`}! zx~`qYmd(?Xt>L9W0_#i@D}#hysnoVx-318@cag97m!%C)pNbWQLeK)*d?{z|oKygv zw}L7rEyJf4jX|+E6>MtkL@}Hti;@MvRXuml>^UQtpU7vdipK{$S$rL+@X&i0yuB#W zCh3H4_~%cUGr&bzH%Y;64Z83-z)jaKG7?&T5hKUY_xJ?V-sQ*WDN^CNEz!JF#mo94!U$M+?LmCTsOZhV}V0q z3~hxE&`Z^R_#3!8jH05VVi11@^QF%&K%YU6=l##0Kh>ae#@s?ak_mYsHviW)qMdvisYyJaFS;2C09;C{;q2fo%pb5`ey zzyF<@H}43(eyHJMK_ybz?c>X1%;J}$8yfIM zW|6m^?1Vfv%8KJ2;FMu~qx74RM*xAJ=k)uTM#^-IL{bQTtVU$Sk^l3=9I*m=1(4M6 zfrvspfbqH4^W?*@Jr6aGfTqdEC+@nqC1sRrfp!wi003*YfMyQfr>Yxrli+_i#N^`! zC$sw}8Q%aCL(}hn{dAl9;ZX%W7n`m$L%EsgvD>eP(NH87R56|~t;W&un?!@Kiwfij zLP^$eON*ququ`=8oEe867=qk1egQ+g5Aqd8_Zi-zcIxc^adcaj0eA1_&L<%shv#4~p?jDR4H?!Wjj__K%XE`Z720a+EKK?Kb; z_P3AClb~Oz3P}>^`_QN?URi+}4XtOd6-k@bkI4)k)Vzx)w(qYl&p8_tG!g$^tb9(0 zM3RbF@^X{EMRQ1GPwpE>#7KZyKz&49l1i#B46RIr{sf`||2PKR5*>;%&FG08Tkcsz zQnE8$qN3Vp>3OunmrQ|<58g9f%wn#S!QhQrsnqG2n0TQzZP0FAcjf!1%jH{n4OGMN z4d8-ofDah7O6pQEW!Vd^9|!wT&K6kox(79L1wP~iw8B1Y`PW}>VPT0v>;jn`mCH>P za^xahZtB;rbg{Gt!sEB^-wnN2nT$+OfYBr1*bo2~;GY%00tr47a$HC^^NE&0!<&); zF7XWvn0-pQ<-8xLlj4Wo09;bX}n$mb$}T3zZP%-H_t+66%1Lw6HwCC zpIsx73u#m=sp#4yq?$~ZSWo*~O`52W11@k9SjIFA=N1$((ZDwH`d(5gx5&1X3#uhq zy*Vn)0)C5kRG|iCQM35)JfZZ;cQ=m;q>qdkB)}hucO`uY0=H%;Ab{yW()Ufb^4o>K zk+N7g5H{DT!5BI^>j{?^M?N)&7e}f;iDbTm2i&74K@fcH=gDqza1)bJB}-d}oTVVA zxl$4H80q#SO-|r~aJq{a*=2|w!YUsC3#7h&^7cQ#Jd1k6w6i2|RgeauTt0ZzW^O*J znhp5|V$($uk+_)@?%WDco2a)OJmGXW2_KBn0p(LzYvKs+N zBY+v=)01Xi_lUpuVMzoX{qRy}&wWHG04Y*Np7|MEO%vC9p1@b2PRB1i?@tP@TH}#< zJwHsZBtW_X>=IiWILefg}zTvexFM@k}1N5r{mD0tkvg!q?r|z>et@v zhYbu3#qb@!jIa&BQpFj8MPUr%+tDzVsfOq+AVT^zrWa$3DT5Y!zV=fm!{y~= ztt5$FPVcd_(staqGo0?I)E-IczYexCrs2aNfnMUm%ROCfu%=vtF8S!bAkzCzILf33 zu!NjTV|8w8vwBsoQz3xnKZo_zd&QVx$H`t4(t5t#$R$O3Q6aQa`0&hhXdh3?9r64k?oaB-ZirL0S>G4&J)Q+^Zi=r-K zj|f%TU5ZLCfXscbiV;q{2qok$<~w$!w0)h^!c8OuCbNJ7!H#IvRKM%}@hwzi_{nAA z;f=oW4i(7Vfycwh=Jk)8d#^$?QIe|~IY|W0yYB{<+j2QFK>2AFsv*ZVJU1(1=n6se}4Jg-`GmbCGx@lz)xeWPQEtSMz6)+zr1nPv}=tpasu#olWc z`2sS_Uw^v`z@ zk+ftszyx64_v&#?xdcm{1cp{=XjwF;MJ_8j6-cojs)rYdpwxyno<$)O&3qpg7a#L0 z(WVsle{N{>r6Rs!TXGI42e9jABr;bW`5k!FGB|C)I`ns+zn*4^9s0D2^76_yHaVmU zlq@kdR8+vLLqs}A5SvBnwkoGv|1(CEE{$X|8(!zdut>T)5~jEnlt@8K}|7=DKBcr6LfPBDM>s9HH}<}??7aD z{d@&UYH;Jn!46CnVqu%i?$e!MB|2mGD}?-y1JbA4vV3`%WN9M_Nh=^nt+hBbm;I0m z|0e6q^?iAMo-ndL>$JJ`b5ZCaN7EXV?+f;(7p^Hcc%q~IJV1958E!Xx=38E9Qq|Kq zp6>NN2KIIgMv?3XTs7JSZYUR%@xnh(Dj7wTW@l+>$$*joc|P(iMFO+iLP<YPX@^JvQC&A1Af!sW)2)AK=1Td)K|aR_unVps5CghqZwUP z6#Iuh?2UoQYx^>p4K`W5HaPv!c{*>>0R_H04jK;15Qybo!&0myq_g)(`p^d0+fqDQ z6ITc?x)f1D?otGv(4F6a$T{fu6@w8|0olT$P=lbsx;C}@Exgigf6>l97yDLgWs?kv~AOSDAh4zxn`!tnrwMyMJbR`v{D=vRm8gR z1cKV0j%LR(2q*7-W|%#yH-H9_z5ocWPLjm`^Zklo1MZy%$6spQPJys#Y+fd7LHjPd zYSrx0a6H8+k*-KnipW}5&~IcG*X@=I)_Zs^Ro}gx=9qR1|6x&etP3OYmHjgy!kTq` z|5_t;a64p+D)6NrZqS)UL$Odm2NFi*Ca+=F*7tuW37NQ6VRKH zfo*KWcbGJvZYW2&HRLdq8t zwT7Xg(2-{kH8K+)oJ&=GU+W`ulY>RF1ThV~EJ6`@9b9$oug6iAM-lAUZQml;C`{x8 znfMN}imuNM>1e}zZ|Jj$tU2{@sKT*SR9Q3AAne}vIdsGoD8pYt-cVIjGdf2#LA6<8 z=d0z$A$KON0`Cm>Z2u`hW2WTcOSxz^p+US&7)xl`B_j zEsxG|ka+JU4%TiyQwy%?W?2cerLmNhcPTbNe7lRMtx+Aa0B=OC_r27$x!w5|==kBs zfmC%8#yY|v^|1}FudjasELXY|q^1@X>ZYrp4Cr+Ql8oDA3klLnCzKEKl{H*9N+)Xh z1>?Hg_m6wTB$;KGy_t>cUXJ-+X%)l@LjBcer|n+jnXO$afy4wpsfhCBNMyHaLZfJEYo4f2E)cXub^FN*e znLoYmw-$jFav}e3;W=X6`*jdk3k_@Zg)a@`pnQ!3E|3}w#ynB=fQre~`}D#s^rR9| z6wfrhl>5hsD_h@ZW?O;;Vx{~%p)QZ=gXHGw{_%$@4I${Q%@vI*-?jXYiWh+#e(T47 z^ajUU_Mv|w0}@ZN+Zl3*+t&rRFXeGz@Wh{A06--Q1w5vBs7nj(orjzz7FL2H zkpdZWTl>-<2VPdH(d-*7RvB1&ANft5D8syF;W47|m$F@NZp&;DpwWFF84qZfLf|*} z98ReE*xA`vCwk#G>ILwc*GW<#nK2Ac0?y9p%kmFZa#F+MM+7}0vj`bh_IHMr=}r!S z=93@{obWxULR1UaC=D)MJJj0s-y@z0wGUCy@A91EzW`*)i#95 zT87m;l+(&Z`b7*-&szJ6kz$v8kwABqRO~NbNdk+1uUOdve(OOOZLp@CQ*c>{>pev% z9+q>KngrzpKp-{}nS%pX;N4@yn$sx$Q93j4Ga^u02HvDbn0(gl5n4BwAcy||n=;<+ zR|oYG>jlM(({Kx&5r;dk(?Idp;WqRlaX*RA_<@2I>+923*7Q0jX1L+9J=9!TRsK#Q zLRsmz@kqZfQtTQ4Vs|(Yp2l3Ho^AnCLUx8AFx$=FK46C=-l34 z5y;t0_eXao`QyoTEOZ*$q;U$G|@NL5l!f9_q3 z&IC~!Dt!MeLMY_Y$l#^sI;&YR0I@CyK*O(Zl#W#88A$Syc=Hp-)lg;i!6+{sCK?$X zAdQ5}>-B!{6t)=;y3#lxbI1Fx&8mRb%6*V9c{%#2zszy5 ztNV_G!POKYH5hB++<%ZI9>SaIZ5FB3H1!P0s%H}}tGID_BE39#<#|<%*S>$CJD2qa z(nMs41x!6W599q5p4uDH!eJ34pF{sGza;62Lj_^vAs*DupF@WEq4}UpJgk{eH9B%+ zStV6J1J)98zoQycK(QziO=Jx!LOBatg+L+59>tminp=I%zAqM{1Ab!U6%+uAKZh1$ z0wK-wNDlN-O5x3Y0N<7y<9S}|c!SM&chq5urkhzB5 zzJ1$ce^(4rb27t2n~9vCgJkA+i5oDAVQn7;0sj7>N+^oYoS&8S`Kk<<`-gJH$m#wN z^7)Y9sd9lp^N-z0K9-hlbAy5qG(z<)V3e#%f0X>dft*iv6IC;>Sv1B0Fen9@hM3P} z@U5>14;CV7-gsz`;Gt=seaN?C;oCD#Ga}H+p8^}kBEAYYy{WWnC4~?SO+BQYag!`! z92sbc@bdzT3?dJjO1)Hf455Aj^6OY9E{rXvCO&i{u$j3(SBvl&LjFmQSzYq1kT1+t zjK3Sdz2Kr&pU_)g+chI)R*Jv^gU8RIYjhsAHBta%e-<=7v#S_;1tX$nK*j-KXz=`| z;C^3jKhpa4t$}+V4I~VJie*3I-}zQiMr7#b&6_ty$|Ox3ZX&XF@~-z`mU@t6e>>>Z zj&nk);@c@-lDcb3>$m_H2VQ=ym?zh^w`LvWX|&_Ux-xV{9V94|h5f4m#7h9?C+XJ# zL~7SPk^PA{&>KDR4T7-&*Z@qu-&=k}vQ>TY88-}D*?@OqgpX#?fEd)SK_vkKC%?_s z!T|KAV#=1kEl-ug=)(Sd?R%m`Waz=Qm&NUsV30*VYS(_P8NAZje-mISW}(|0Om&Bt z=UrJ`;@SU0XFWulK2y9&uVVDMfEb84a$3@IAnE<*u3RKx_2Q6KJqQEI#ud)}0I-Q& zg&h@7&o>3WyNL${Q~fDGN^V6QkuhslNwGAd^@{vbXa4A{j&yL&zBh zEmoPbM?q(^-s9&}Uejt_+U*|&Cct%NMif30fp-ohJVQvj7P!M&+Ybi1et=Z$+iSKv zl0L|cr8_{&%VIN2$_=VWfom@;M!OqC)u`|bZJwZ|wzP|AYjYM(eGb+m-nIj}b)1OR#w=LezgPcCN5 z1s(Lm%*;jX)=;zOK*)PJ`t7C7_cJ_{p*&3u^LyXYVIr_ z+z_2spv`D&9Zt(fz_pb<2z%ywDzI|l#0+4=67T`3+@Uvbiy$1=&y$fN+6zz|{VzUb z*B=PPD4|;E_5#IzgWuJCan8yzn*d}#+gPAA=)lXzJ-dtJ{>U~ zk()bxl=UbE6Lf1xNait(_ErZtt7-D>p&Q<9Gg8+0p9>3BFCiB)5hR$82c{ zWvf`2J3p)#xl#snq~j?x`bF~NMCd~`S{0#~Q2a@%=<(#U&+?vz24H^w;c&!OZ=|-q zO#$H_W>uOeT|ph&bHufs)5cHNo~nZcmFd5J2Ubj4KLtl)4C;SE_B}1mRUc@1Aoa2J zlv#Z(=bp__9fGmBLzEZ^zWOO7CWFAt(8;!9!Kp`_E6 zzS=Z2#8jc2tc_Y5*Z+HXcm>l~VqW_CAG17&TnM0-J}ebq+88fA*+qTTgXwMTpaut4*lenwok5Bj3 zp?Piuh2>lbqjNE^cAu9DR#h7nQ3b}sHwtprm`2jV;qUx)oI^F1#zf?3NUg;q2<)xV zlGu!=?V`D3?ECW*!i5h?E(I`q^l6cdO6y7ckWkjd{PYCK zXdU2#;V|wDJL|RxS2d+7E$&P81?SnLkE3y-4^%Y@(s%Dy3|ldb)WZS(bOK6=Fifg6 z3$4Pww-N>+$K#&XHoht}DyLB`c%(x^{??&R2XOteYrnpjKQ~IZW4DZA0N65;lS=DvnQcTPtgDyh~vQXf=bEOP+6V1 zYb(u`>Si0{CBsr1BlieVDg$F-BWcjXdj}o!IT+_&`4$Ncud?uIFrO9Mz+1>w#O)y_ ziRyeE?yzf{cv(sq)PE?o+(NBbnhZO9U%+Of&*A4z#^uBRAExsS$HG2T!b*c~B8`q9 zxQ>J7WHiTZZZf$kU*XRw3&nGhY=VaHI>_1$E9pEOzNz!zu&WZ)5vZ}$>6ki;AF(68 zr&AUmWL>(NGtBnK6H)r0pk^rs?$8iG(sO+}8s#8FP{pYay&Mj6FFj2(_6EL#5LtYC zDb#UI&%pGuYd=FeQIIeZu_cLPK-ig&HxbgOxK?L-jK3 zIw*zNXjVR+wY_vE7PjdgiF&!3jn82jjImy?2XaWP#$cB(^=rke{y1&?aq47%!b^$| z80Jwi02(l@!gHubFk$gPxfD0a91pmFgZxE1n(lFN=`lfXo65KI9%huX$%W*czwbPj zb-n!3+pfXjNlhvw9@`)en%+`TAdcb*%gDX;9Y#I@J1Dkb zt+!QyH32i;0|a=iamCeVrdNl~vlx_vJJG+VNX{r=5peB9iJRnfw463c_;AV|w6fU~ zpep!HhH=jU)V&_j2gsrJTpN?cV>KiOOM~rKjqT45+gG< zY;(}#E<0+OwZWapU<-z_S`opJR0*#~km`>XFYykS((Qd1yJh>7$k+Sm5TQ)q2~x z)VZg|m&$>0)CU-}HPG=i=h`A<`+kBU#|A;b%lV`zB)c2%NYSC>BCv=B>YsG~rN;gG zK;WuKll9h}Ydg9cJ?`buH*};%&F+X9_+xiFcz2iwg65 zctg6C^T!G#HK80)#a-#*=SADNOU{h>Y?vCaw9*L!ckU+y;3C`Z446)o(K!4d$1=?C zQtn5%3wJocK-_CrcRe;S)i>(E=!BF(7Z^HoGpL3fsk*ffMtVoj>mb*H0Zkbk`uO?2 z#&aU0)3kOi1CB6_$?P{*q7TdH_UNy|eErktKoCo~uV|G5SehS`a??>jYGwjvUNdH^ zuoCjGZH%(OZHa>XzZ+`qX<19k*p0XKBh;9L$8|sQi_D%4XYLtc1Oi$)h#Jb5HRdNE z)?-5rwlsv+e+bF$Z&OxgMzQg%R^we#1s91 z7XL-)K4dgp4`!*9r8}cGBVZ5GUGFbGx{Aubvy)7lj=AOCOn5 zciiCruz(Q~rNWFG`FBfgRE~>Uw;kYEXEFRh6;`Y-Z%2BgenR*a&wj&4-@6t^s(K-K zF0fDeZ;V~~@FKze#RWrAt|sQ{?ks`}`iuzBKsT_Nr zQ+!{aUfmBoBMv|GzQs@NEWGK>O<)Oc4kCiUJS*liyw2%+PNj{E|l;fEa=nz z@_()_9&JSu%^JYl*RqvuF)v?wm^K$Gh%VhG;utuf4ti9j0yHy*fW~5dP-e5ZK6Wne zOmoPVf=F z1P!%}16-J}3F#&yS~4%;)Ismq*U31MkQB(mAZlZoPDP$Q^85Pk_{VK5YgI$(eYK0w zg7us)f9w0QZ9j|mYY37vAnn^ft4{8x78G}9(vBLJ{bkj8O!Y)GlC}S;cqh(7<&^~tn2^B6&$iYiW_-p?;c*v#EYKN^n zsk$Q%n*yY$_p@4gtOo}AFVe`Y`-Epg z7l-?zB(}V3W{zwZqxO>GXwm(`k$6l@pi9REtV+m9ozO3$Fof<1;pl?&gN<8AQqqHz z_Loib_w6PnHX?zP-F!A2xs|A5#WlTC2oQky z)fvJU)!lfjB0D}`Vm%)zXe}u>58KD)JNI-+uAJ?5Fa~w~7r-cqeWLq{GH{xcl9#cQ zzZL-XceVC$wgtdq_jte<*`b@Sd(7(|Gi)vd^^T*xGq3k&pBV>IcL|VekbHh*cFls6 zwiJiePMp1-^{^Yc{A5V!(G%yeV+!zJgyN&98uFK+_C1AFh{k1u7kjRnZoTTkMiqK6 z5Q@)p*O7RxObhxAoAsB|rC%pNS2s`e(f*)A%k|cc@1dW9)Lx~!y#%#(YIV^wg3M*| zI!B=V1%?2YiMr&iuI+B_Z^DL;O$Mhh43XU)aqVQ0d-hXYCToXZ%g~9^1rM2hE`Sh$ z^KJ6I2<%HFz;&xfDlpy&JC%yVJuQcpY8k|n_fg`rM`b9}b+O_2TC|c;cZ1l)ISrCX)~ilcWID@H%k@)C-o({+)6X@o8&Y(j2$fNmBVqwhfT z$q5R4mV#x_wA$EiFMzy^t}{*M;Ku%CNuXHSOZjCao==+fFDD9DDzpGJeX{%D<( zxjqZR%Ns2xD^22T6jSww&X~*`0$0z6q*vj!k9c-toWM?cRpT{ZiZ~w@pJ}Quuu&L> zNnSv%dT%h2q&Ga@E+5O9x(WOMQ#;%c^<&3@!cN*Vh-9H>do-JVo|S?G;A;sb(t*z6 zZQswUB~_1mgL@o)T))MzG0Y*%a`oGUeyz8w49MA7KTP*bkx@g|`!)(Lw0OvZ{f!`r zzsTn=!@KrJat49X_zB}7DzPMmOfaaYU!kp)ogDm0R`28u5{s(!9<>ugKr^<7U`}=SD8_{gE1iMqv!NF1u@gXENfp1$Y+?ez7-QJCl!SKa?d@e@i0fS9trLll@g?>(mD@N&Y=1c*iv#9^t_2n>2T zB3lQMwA=Nuiu@e&@ zTJ26aeQUt-ZHAJiO!LaXb;}1sz8iWuIR7sKA%!|}Whu}V1V&ya@|!xq4z|_gNWue< z(LU-+qaO(-wXViW5WiaPQ+5u5G`!{Mb`Wh_)W5x~%zB;715_>ME2nN(_-!p8gt&fL zhFug0t39y8-TaCm|Arjs2Nk+ZH0gu5Vu4P@BM-6fHh@yNqqMa25dS_t4)!bQ?fD~T z*F(fovo0{wYJpEJ2H)T}QUv(qL&JwSpOt<|DrIpLL3|7H$8|7n#!ef!K-Pd1l!BAw zt0B@_heW`e;rJc zg&jPuU_^PaF=k6i6X&A|@C}mN>lgWY6o5>7;iS&AwDu|Z!Yvtah}Dd{zDk}o1oVz1 zU>!8$z~ha=zNxmzGVp%y`1Crso@XJr1{2z)tnGfvtGy=n03dk01J9SppcF(Ydm;jo zL!?H&@BX=2Oo^2xh{NjCGJ}T1{OJOtq7?3%jYTQbXPy%>-2L--AW+L+hqJd^z3%Y$ z?5R*Pn@iULIdgvsRs%HP3=my=$6vc7%P8t%k>cnfpP%mlKCLAXKWIbv1*@aa?h9{+ zQvV%NTRnQapZXou#!!iMyQ)is-1c-{137*mqaAHXCT7nK17CO>!=c-2pR#ZE>&<8 zi6(eAs)nBBB=c+KiyLz#FbP#jsI+X}mn!XF4Jj*kZ)C!Fvb{~!7BTiZIsM6&y|%)s zCyhe;2MY{Nc-@x)Oa@5>JpqdJEi|_hwpz~L!Q}Fk!8aUVuO8<8&sVA_6Y<~*V~SN9 zt-=&6V3We8(7nBHETK{B3GntHXuUZBpa7LY4Pd#7#IyeF(P*?)^KlTE8#>i*JJ1tz zIEtW#ZNHs-^$z{bSRzYVVN2Kv;W_H@b_ljJOJ&$9k~Xo^M%ZTfJRn0hqi&$B5vzy4 z3s@!(I^^B#{NeTz^HM{mM&@;6bG27P%8Yh^i{8VQjJZwUSn7ra>BROtZEy z`i?JxfSw$9Dihv5*h1B^*pyA;^}J@G`o+G3ENQslz60R+Ut(E9z5pES1S?RaA5l&v z;7;u?^)57coFVmXTu#64iWp!7iIBmO#d`=H(O(MG}Lg`_=d!%5Ay=MU(s*k1tO($docT zBt?|>1&3fzE*=l$pYB;^XIW~y9ewDL@zb_g5yVzNx7cHRh_I9t5 z)kDjeL!{vfXvLN`fCg&rTSHDDWk%;bK){y{wv@)~wE-ThiHkgrl{RN5rNcuo2pNoO z@nwctu1u?~31`2F8+1mmjLHigllZ0AbLHcRT(5z&lRZp9m-9TToYX+M4L$+=%UJH4 zVdTAMK2e;{in>sruMfYZN&~EKVI$CCw>L+*AH}=bNg6(`+>=CT6kK1eY=u6l0Utrq zswn6~4+2{Ppx}B28?H=)q_u-|fgqLnjcf1JE9e7CJ>p1=LYh`b06i)%>k@y_e#h)# zrn3U4sN%XJJ5&3AbzKH}Ux6NJH))GJx-Fc)x`UF*$5UA{%E=(#3Vsz8QYlz5XV321 zA`??!1Zh-SdSw(r7;8%&-VPFMzE_?mDMl1ir-)PlU$3)tdA#~eCN?mh70+c&(H05J zfx!o_pp0n=dkX8RQen8MW(=lXo~H<3bbwqVApx{|IV*8i9KB?4-(VM(OV)C{j{5S< z8OUNb?`Sq5mqCv@7)Ft?DolCyGr3E z4*p(Ta-$GMl9Sct3EB2+1RsL@M>ZguB0|Qd3h7n)n^?+CApzUixo${-C&3PjcQL88 zi+PnPGv-{othgvKxq-ds+7J+^pWmm&a#LqQD0}D{0{GxBBZ#H*Snh@0n{!3c&gR5-1|6yh<}Z(u%88(G;l(;3%J9dS(Gn%^^)pAW)eG!(yFHWhh2 zyT9QxNp#i^;Dasil_{yDq&lTzWi7D|iLJG0>FWdgRcv>n{`g}IT1}~?9j--H!j#Zk zlc@o><8N@qkELs%^d99i`|a`mM>Z0SvC(tMIaWzH;xios0;^)(nxm`9)`colZ3qDn zRQR-ljS7&ECLe<@zPt(a#fuIOoQ4TpS$5zh>3Fmape?OxU`HUqB-L>6(YDDT*;~fSk$-_2Xs{#rqbn+}clV#2ra+coH-cQP!;$Wk^GT+@c>rDU zEzppZFqOC!0!(ehPH!mM#$2v`^uKtIyiSPVaI4O6=ac;Ra-*N`)JrcC`OAShHw22_ zPCyE9ZKpEHe^>4}V`UAsZXu)-uJuv>#|J@=4-X#g^MZEr=2p z=K(dUfZU}eQjwD3HgxQ|H6uGh2MIZLT0BNM$|H~NQSV|Z$W#6QSQL0tECG;d;Dz@9 zygTt2D4*6NM`0`Z^Wln1i~5n9g*&FUdr8<`Ea3GXe5t&+`{qIg0=>zipm!PSF*o?; zc_N?VxemqA$`X@Yj*;#B6ijHt7Re@{(_2by<%0%-ND$eHBM6bAhUClU?q?6Fvc3fn|@Sl}HLhqG4JluQSuR$tH6=5&Dx@4Og$c;T}b zc7Kwx9cr19DV&Dq0t^S10E99G3@01hO@#7lCB(Iwp7lH&KZ(WjBEM@MD#8*Z35BZA z21#IljcxzRtapRix7OQ8WB4FxCK>1EOP(MOE2R9{?127OJuSa>3x&+4%WV}uX!;St zu$;U=ob>F|Qld{i0Tk#ZgPnK+KdFNkuGJmR2OGPIsr)UxB)T2cVh|F>e3ZmOF=TSa zP`~@TvP3Bh5fYGW!WUI38%OyCp1J}iD2Bbp=1WG$7M?P{s{onW8k1m?r%reH%xVXb zQ5=oLR$D88GomSJ>EBT@4%tkU!xa5aaw^N)(t6LauUK|3nGsCD|60UEV`Bc8ezYMR z<|(zai@Qzp&_jVI>b^CBdd~&moIinFyqZ%}kt}=xqyUYf@G*5k#2k=@BK@z*_dB4s zA$x#wXh7%}_hK7)ybaS&-xsuWOch(pu#Lv=yF@!sG5kk3v}iNCl^cy>_Jlk0(yJvL z|IlOwGTJ5)^4`!@)h^H9u7fxCeH*&q(@h*xoQW~D28p-n*&c1|+}c?rx3BhpocP}> z#A{O`niHd*?=FVi=NOP?Q)vohxEXGV^g6*P2Ox9tx=)c9x&KGu7C&-DmRSSBS3%n2 z;zq^T@TSkoNWN=h(B7w@`2j*Ssbsead?D8z+p1OD?@^?22zPPF?6-_1KU~NJh4@G3 zfp!+Z!ti)NqDnv-w(ZMTq~C@Z6BhYd!Rh&2|JfM+I29;6)R}^KG{T~xW8Qv#pOTE3 zwui6+lWu)5hY6Grg9^`!?~r{bIwYyX$H7@#M;}fq*2i&80dQoZJWW%e&?dl33l)aW zyE^|GKEP?VLk`p5#qBl|l~~nou)@96y0#rv&{y z54O!cvapReZVoz)NRPK+G#8e{GL9W385p`&ZXa9c%0_=+`GR%PA?wlf@APM)S2(OD zub%6*?DG|2}+(HRs@)&?5Kh0w^C&S)E$z|ZuJ{r2mzsgQw8FPN9B?>K)} z6_Vf^_R83R@&htuxgUf!r8i}4j**AIjy`Z>yUZJRhiJJLE2#4Pzl8o8eGQcXzRE~TG1NQh*p3k0sY+~Cn{0PJjP|FxM*VY(DJ z$Tqm>4|Gy?N&j>*NAC}BYv7C-E~rL8mrSBg(+>7$P@trq^eHty4V(>5zSPrvgPEKh zQ`kwX(A3c6o0SIny7oNAPb1p3r|~Y+p?p87H<}i$NAu;H7+TtZ=Qix+S6B8S^XjaA z65CIqh47pL11$K?wVgT1Gnf$lW>K*JcX)=8?fU~ zi7c9BS4pRtUyAyECHv-TJVbxJ zb|u3impj;1-Jpx$uYWZV0O~!=PggVR){{u5WPSU>TESY(k;J>-G;F(^0^q6Zcq5tM zaBPYb+Zkty|2UTJvtLujOi@?=OA2}jG-Sa;KwUZV0QKJ`$H~*Kl-SLOV-xUOaQaQduOEIw3vAT^g$48I0z{HcQ&;U zC;x($=n71={$9(0gm;cM#IiojJ>~pGyrJ_kWjM`!^3C?@)lVh>h;nuj-)(J9o}<^>+U9=GkubvO`cJ#}s#wR9elzN7m;3C(cZZC--WUTB6h%heG zX`61F^VywQ)x1(Au#_HoBd(RT%+7cj*>`1AfhGZ(0CWoq^b5#Nl&r?t;=I@v;RA7z z?!f%(aohS5jWRHs)d6eY6tnN_5{Vs6&Dft0Uv)ic_n{lV+!1UEsEBm&nG=$(guP)6 z6%w>-;eM`K^++W?2N=eq8?BT%>j(z+DBE6vQJ=geGoT8z^;3%|)=X2U$B&>BWQ*J= z)*Cw0QKN+2iU%+zr99~PKRLP3n2U? z>w{P2-2-3sdQ3-f@}!Unu}p=|u+6g(c1ax@PHDCJd;ca#m#M8nKZPY2oA03ayZK2Z zX7?!B-7VtoUZF12BM&+(`^bSDi;#>)kehH;qg6sw{D39?L!SRyHp@NIqt)ckteeJn z_UNlv8Nf;j!5`6NjBDPnb)j1dvQchEnX!*twJVety4R`=y|_ovW05_ z0Ig*nqWtDtrXfep4JJ@P{vt~Ov$W+MO|MxyKf*V1;FLNb|&x zW3tPSGW%HTZ9X~OCF;p4rNoB?6G7W@1%`!j%b^z;YKLB{htR+xE z>#JnfvQRB70G7Tl{r=zkBTFn4bXK6rL6!JrBkoNJvCA_i>rP=**nd|J4tBywT7x+BaWZum`8>lhte1Bs8&0YmQ1 z7y@K20MC3XKTxX1qj4wl$RuY(AZC%pBJ&ulF4G9%lm(0*ip{v#&ANVPSyWLEpfzrs z1t6|sI*FIXP$ESA%E!YiJ2hb`4T+xtT{Ct)N4d)>F)`;HmC2NMS;!FP?p zBN_<>4!6bu79*AW7)q@vMx*PXt~{p3Tf@Z5fvJGv3ssH_hpog~1&1?h9rkOg0BaTW z^b^0QQBPNLGCw*sCQ6!9fW%tS&t*|9@%u4Y4&j%v3FKerO8ZDM-h5+F2iN7eH1#Wc zRQYo)hp04Pe((_b?j(i4cSmIZ#`lacRH*T)kxWM`r)^Ewc-Y{3Sd#6UvYV}bPleF* zo)YVJcqkKYQRu$)&nQ)GN~S-|HXN@wNaldx@+VBz_JG7?y5&DL*#~aL7WMh)GQHfh zvoE*yB`KNKM+$bsIM^53w2%?(>`}*PL?5!4eG8+28dP5FlD?8DJKwwTm7rHCa+JoM z4X#in`{(@Pd_=W(_k4=Avq=P&$VPG#d-%?ut2B3(kAIAd?*K1YdHwuP z;Z)4+YdtPKR^swgbXPGX^wgR!JKxpzP4)br7eG&hCZCpxGc)_3pGSVT0JylCv$}YL z*>qQL)z8{MV-Zjc9(%^GtH|b)_4gV7bJB$Zg|0&w9(ar0+kU@amzzYM+p-kAEz6UX zUn9Huj6_;^(6Maf)1j$dOgaZ)$9)c{x%cpE?$hm^DKTW~kbLKu&d2hMbdqMfn1$3m zSPjf(6bR@psw{0^6UI@H6R08NksqS=s7^f|+HbM<$&(nR=2>xl1s%r+m0Pg57M_R{ z7^;BT18p4l427cUX?oC`*ui$z?HbW>jA|h-6!HVLTR2)ce~4p7St$bG(fV#(MbS=U zWRa5k>X!56Vb&al*SE|!*m?M@9V zdED}r+n<5M;V*FY?^Hw*1851;GR$aYXA%OVq-s%E;z3KdB6SN&3HHkaH z#V=wMo)ZhXu(eqK{#kHw)+_N4#GG&_yQwnbiO3BJm9Uq6eptzf0Sly?Fn4#)$tPlr z@R%S{Aqr{H5_Oy;OREu3!6Vm7x1}Gx!b3D&r!7Q7-;*8y`0|`{S{?1;)Vx8OI;r&+s_bLoMja zZE)~O-IZ0Wp!K=Y#h$H6tF!<*q~KQ}-f<2_b9F|fDy1f~fgd2FH{8HK4R?f>@C~r*PwJTfzToV3= zZDF;K+*2n*BLyc{3}s4*J-(K_U{8)CpyPjb^2g!S@xXq8RM8F8QE!967(B4?Ouw7* zb^%#N@=9#?O`7<48JOC!K2z0e-LCrv z0cBj^Sh^cM4-=Xj{X{oM*^_Yuow8H|E(k_AlX?QfD!y&mk*-^ceb*ZR7!Z)me|oYZ zpCR1#nCaW8nkg{3{XHnS@G4bTiWe$a_lKKVMOg(x{^hry(zijBAg`zzS#+GK59I{F zB?+qay3OiJ9L%d3SlHVZYI-fy`--K3Ly49lMwO@`ed3+>6vi-hJd@*rRZ)9=wW|U5dHBtJBu)iX#`xUQkxLqfjak)(k{TKbc ze@|>~M})x(1XxYOpFvu6JyxVK8%;`xlKpYjkzC4zjP9 zdT+qPZ`kyq_VL_|gpLxaNjK4jC&gY6EXGdWtZ_60Vd)I4$^|2HU(GJMQvMtRr`!+W zcoT!Z%Ntg^j=}$$c#kA--p!(JH1gN@*(!D8r_#miRP~)}n)iTgQg6BOVfrj9EL5kh zvjhLdF&gFvOGMuK`AkI)K_Bkq*}@8LWz5K%$UFnAdW1fA0yd7=4J!Q@Oy8FMtKasO^uM zG?XAQ_{&}5N2im>@4Ojy{P#MS6ywAFm%;pHyQd@k39xEEQ|e^OXj}s6H+zd_@0!Dk|_5@d7BE{Jz<04a&AyK0-W^zdIb`_Mt#ulVKuiPC<&%2$)Zm z8)Y%EP6!i|Ovte58n;;7b@MDtM8AVwV%y=QW}JyS)}VR9Ep?`oGioMZUG=u2K}sZB z7I>`xI+)v1-hF(7`YzBWL&vx&t0g{y|D}Ks6=i{N8i$dyZA<$op_7`;eYiwMc;q$p zDzql|iw_0Kbck8yFk!^VN6@ZC$(@$DUV6LGew8Lc_70tcyI`*>yi}e1pWNA}iyv?7 z9Pd2)pdXSz%2Ir8&HQDL&RL>nq7!5C9 zA>B)0K@3liT@?;Bi1|=rl@OUdXcc>AxK(pO*P{}6B9$_tg%)gliz1Uz3s|ZE~ zx_XJ<#gJQ&Pagly8#2pkTg@9hn0(z@s#GD$6Z|E2 zfFEFS%0woe2~5Po#N+GZYa?n|)fhTC*rfk+)8rFmQP2CFZi~C_m=_SS%ZiA12mEIB zi)_5X=9_h7<3H4bv=lViB||J}-i})A<7Sy|2bQ{&38KMT;sKQ#=KkY`ZHL7RFGH{2 z(&yZyoumf`1o%^(4vr@0ZpPL$qJ6SVR2joUz~-%m?qEQJ4*HTS&+Ah)+F_HTDnfm$ z>@I*Eth@L1arUO5BM;N=djsO!;d$V$6p_fM9sVzuw^90FgOaQ31y`myPGEv9Ki(#u z3&;FYIu4j6A|iMjf`pA`y}sUUbR@W2+5Xu3#&}9RCTKjJ}Zkv9|)3R(l5Ktl*M>e#l9@y7} z@1Nx=hHnj?|E&oy34VHqKq!DtKB%GD2(^z+QP?_2CzGN2^Zb_au<75FM>-jaO?l8a zz&ZmI3AN^DI8zxKf3!0)GoDVk)7LHAN_e{SlVEVX8VwMo>wb5OWF&CPwA&C3$g8L! zE|lsv8jNm7>(bct2`yop(!wuIpoyt@biItW>fZToD)@B=yf^>z(?@NgjiottZnwB0 zd7V<;LXAK_>e7TSIxarVb`1NMBqe?!dOviZip14Cl$alQnMiJkV#1b=tPi;DUj9{8 zwytwJaybQU=WK1jX5Ev+&b=iU>yhIDQNC+EW@RxiBwSq}pXk-={4+P_nl!z}5;%dz z!iA3%w^3C`>_)Q$^r48~3C7DHi@?6QdFo(c8sUmkTMDqQyOY^^8%>Kha(UvWb#SNW zb1`Di9Vh{yEDemxx2?%;R7bxW{wLUeVF&#V_|9(CTD$jl(n-K)Ro#e!7DI{76d^-a zD?x1)uYGTF9u{P08(yldeg1i!57~iFm^u+0L5D)aoF*!2gRZ?Fo6SojH>UOmX-q$pzvxo>-`&W?DYr*Q0x+ z%Cia)6?$1Dq7>XxI?!MVWLghDlk-U_hx|F>NRq{6Bmx2aPug1!#vsV`*_#C71vSm4 z17CR^PSlLS%n9<=fQg2Q4e{|)o9i}>(ZBn&1zT4-PXA=9dXHzzomaXc6GGGyXY84!M=_c)u%L}^k z&FDB}GL=~-0@GkN0-&3fSxv~VX}O_8EaL6x^>IM5F_C7W!^}hedlz#DgYbQ(`*EB` z$gytIFHp=*ej+#ca9Vx@70(l&s5MLpvn2dA02IOvOF}Rc`R2qC>-Q6upnQJrd#|Tw zMm0xI>g%1R|6ac+aB2*G)~Y%Z@we$1yqrT?g8VKEl9|t{nF>u{F^J?CRCp~*=!v_T zRakmSMJGB53Y%sM~Mw)NoZgGaQ$tReJ*SS35ilz1CXGSn$CbSJDtg)u%PqOx^j=Sfe)i;M1y}AKe zFH>6?LhEkzdwt5N4fwsuA82X5W1pu&;E{icM8AxoUqcSJn)#&b!r5j}4s2Jr=ylDf!(x&!XZD@3cbc7S}KUc+` z8jMqi&T=c@RKx|d7y*{$Gc{okP0#NsDW;r0zAvopQU9)=nBb|1Sc?%>%9=!f&^XSG z{?BO#Ea~95FTGR!fH6Xt`vDAv#IdC=k)!$D0y0xT={|!RU6f9+<1i_9C&VhLyZ0XM zTrqdka14nP6K009!Gv^wGW&c&@yJ{I=Jq3u?g6XE=&f>!IE6&2#pa=@{-YO>H!}o5 z8hO|RLUGD_j>-8L1hXjd16SHmG$m`rBuGtcE>XJ` z={`JmS#BGciV378d62%v6|&dtvC79N_47KTuno3AAnKA%0m!dtYuZ-V^LzQgH8k*@ z8AS6rZUe~aoW$~1TJxfPEjAigF!`ja&g-{)Y>u10Cc7LjD*KMZbAL+ZPJ~zo**Vzd z;eC(eKbJu0Bd85Xc-p6b%S{m3VjC0ucih+|yX(!5-;)o0*$~KuCBDwYhtcHBvf+Ff z2JH;qr8oyW!(pbUKBm3Fd!YyU(%vNwBOC+P&gSZfCsj0DIegP?o0FyM&1;Z$_=Tgr zLF&ngmsy?Hd!m;-QlvD?g+fnLDX$Kw(a6{hq}Pyde+%rSf%oQ;Jg_7J4egF#Wc9?f z{!mzjD}VqwvNhBC&C>sm9W7%);EvSM_p6Z+naHMvfNxJGz|B}6@ZfCd`|sC|(1jiV zo>^|if{!8|Z(5H#Nmxo#EItNbM|h$|)ymXTJF657>_^JH63-|=IU7QT5j z!qcN9&w}+&qK+-FJ}qGD_mM~RN*a=9$u!t2fnnl3N}U4!>YwoiZ4gfu&juWHGfo^V zPTmoGB5c%M=3FxbRgY|`W8HW_fnNpi0K10PvJJp*CBLhEk}<^dzaMoL5Icl@|2S;? z(8&jMp(nd7rxnmNj^!(-&pJ&92?-OsB>w@D%4rLV zkMtwxDW>mI5Dx#s%XkCQCBB83oJ7A8M5gT%KFZ(1H{lAoE?MD&C2E0eXX64BbBVi; ztBhE@b{dV|c|BfGVWqCs8m6)e?N>^>D`xXuWFDZ@fME3U9vup!vJEKiz2lrlm}=v7 z=Q|sY?3CB><98!$S-XUDuKB%=P!S${MjWG+)3}IXbC|bt&jNjYv z`0gO;k7l<8EG!5-5-u&d&IeuupL%SYCJoUwvn7?@LJ<;cOcm>CBJk1uyKP7L!EKJ| za(`CRb_8x*75IsUG6xt7|JC%EaiQ^l_TW^oTf3KF>jC%Otv%?H2>%|v*uRmZv}a56 z1+sP|GE4jE@SBMBeC%@Hw<2QJzA178ELI+s2Sr0`s&0E8QW5*vcCr@u5uR2t4*YZE zdlI1QV31djy9}$qO-HZ!*i#VavgY??4YKNzN!EL26RP=q&zUIs+JxFAMZDQZ!?v`a$`Ha;F;^!6dp%us}t*FD?@hicb*Vwz>2e`qS z+CrVjiFO=}nqWo5kqQT2jB1Gd8huFf8K@c_IV&tZ!3Hsv`iK+7)DK3n2^@d`@4d1s zC3R4PnXCf6pH>d3iy_M(kN2U?j{-uoT*-uqCOt!blib53|uvE9}YC1X*$LFl(^AQ$DfgCR|3IRZULLtJtvOK|j>qtHxdHk8#{!aOV#3g&UOt z4p(#FO-hEywj!ZQdGM=mLw4CArB3>P6F{J@LL<4xuD>rUKC#DzmyH4M4F;IwyXFHn z%RhKx=+@ZcHoX9Q0g`1sC8+H5<#=ba&nbh$`}z_$0pQ~M9qN~|Iw6v5ovvs`?6CA5 z4i~5kSYg7OEylQD`NF0WFfNU-IAF&3bvIQ1t+v^bfi4CZw^q$pz21_-S!uqNP1ii> z{<+%rCn{2^)15cIR$|sIyjKW9aUT`p*6d;B^PKp^cdgbdEDkrS?G9I)ynQwn&ew1# z-oZw+1K7!P{D%}mS4>Z*{`H&L@BKP3m<{zW0#%eTG>ZTkV&nh;Y=i)QIjfMl>o)NT!dis zeD_c!@cK!L11oA9vRnHIRANxKT*k1%EHXZ2&B!#QTHNqp>cpukD+b0Y522hM$Gi>m z=0N=c&h6wiJAj{q`)6?)P43jxp%c-ckTqcMljydbv(P^2@#(_FPR_lvhdH{MpO+lN zfX>k^A|oy^}fTVPno9qirz+QRAp_<*wQ=Qtm!9$wJ`pF zCD1U`xz)5#{BT4L1)t9O9)21S3>*Zs0&#lbKBbOo_+&EVOg}l?-poia<-Kw3M?)|= zMvz^2zkA9*7$73%O8CSRDXH0T176!Rh6xD-7JiHyn-9v`GLSYT^Vo2*G*`Me*QefrR@J!cYlL0y? zs4`yR*aMeZ^Z373r4t9b(A5OniteL=gn4kl(8=3FyH$)Lol4xVwL8wOg)B#f#?R<)tR?IE6v9-(LC zTmAkz$!DbU+#&yQaKMpnMO3NSzoYgNRKB;Qxl#E4?B@v%Z2@eIkxm*dx31Q;7f!R0 zFD?d@1WcJ3Qtu1>`QyB*Rj+S%pUDh)Ebct@*`;NJd!$$^NF6r}9X5b9q;vrktvP&r z?d4y>hQQEh4@RZMz3q1Q`Goo8XFY!+ocCDyz$@K*E^E6)FTOU_-jPQxp&ii+a88d< z0z5M6lT@KhtO_m8vyK49_FK#SqI?_AR^Tr%ynnpQXzkXkL%kttJx*h_+^aZ-3=6Sf9x8 z%(z5tWxj;b?>>p56SY3ddjVFD4nrh~X?NnFJi&+ZgrXzU0;A)pS(m1Zp~3t_0w*PL zWP_zGf=iP$4vv$#nc--&0K>X+EK!6l1OCV;*3O)_vgM~1EfWzb-D1WHRR(o)rC}gE zL}AJqtXlE zN0?8C=tF`Du>H}4!_qU#^Ic__9TN#RF4?MsuG!6|C^2$$)!5vc*21swj_fLX>SL_b z2_83==@&<@%x(*67o1h{Yk^!oSgI{gK3mji%OOH|@g@5UPBx7=Es- z=-jfBs=f7{L2T=}%baXQrb;okjkwCK2n^+Kw6-(;pBLb7Oy09||Hn2Q2StzrbX=Nd3%kMfu2kzMmBJ%XtEHBk z97IBxlE5cIIHLrKE)Z5WP1>1DqYX3VhD7H7Wm%NGl{(=O@4YuCzX8C@e#3*pDveGm zqIwKSQ7)kG^eAa@oe*fS*KB>gw>=~=Uj^Ru@l)xE*0iidFtqzEiQ>c`wUImtE@4p1 zyae0;DnTN2HM|K!THttm(hL-eU`4HX!CPvtzh%;w)8j6`5)ozwunhB~tFZpi?+>qnuaGi5`a3MiQ878*51lF?n+gSp2zJTM@%x;E#tn`A>+a6F zqEz1lUyBI6~LBBcFY=7SD~ zc5VuRCE&Neq&!`l)@m6BU;q=4IeVq=Jq|2fi>$D!_X;-hYX;7UNU|sZSD3Q!om<5) z%J}|RvE<|4%JN0u`VW-^o>fcmtkz_@z8e`6AzZV%@t_EI;Gc^?Lm@Jmj5?E$#99Qy>7;tk|0cO_$}hmGQl~N z@aYi@BLIisTiF6?`?+q@LVjGVB@cBd6Ic7YZLo#W8F1@Zcygpf7arsGQ!!Z$<=737 zB6^lMp-np=_mAZURwB$m0@*!o8kI+mKxCKjPwfc;bRoLy2^Wokwq6bHQb(wj#yqum zNVJebctT(6hmc-b$}arI-uFeO^%WL#*L8NR;G6BFMnv^@5+VG6@5<5E^m6v~n+^Au zPZaCAr=~pUWozfb6Te4<*JcJl`fr-YuNXU?kAZ7rbGkH>2d*${Y;?TRY}cn3OQ{&; zLC`a`g4dwWp_Ys}10ClPGSSqFdqM6c&D$$@;_13~{(3=1b612=G#|j>kG#%8!7I~M zgijKSdj2Z#XFvRSIv;rWqxe3AK}yX2tA<}~K!8z^^Su->Mp4ZL+8(V+d9 zU1}RCP367f`vYVH7DP*Mx(Sr39a|R>)Vh0;sO@j{^?<&hR=ig7se{T7EC3Gr5=Ynh zhWA?PrIhj}@JO)~8JJwB$f%Ofj3rR|9#)he@Cgd=Up)N_{^O6Ac&b>|PrF}Tb5d_K zIzFYG3+TT=w;qpb`zRf(u@IT$`1bo!6sz{%1-z1+UxVp^ zzrLqrAfk3Xfa%QK&GvG^9x>m0)k~#4kU@vKR|gEHmII@dkR{MOuuFgGJ~rzA?ubLE zItz9d^3DM<-yP69?Y%G0aiw%rSTrBTiiJ1d z2;(s6hrdX#l_vk}9s7nmglicD5S&3c?r=VEcRGTYdUQrxguBh4$ZOYSd#27%r}9l3 z_4&7kyk?(i&#tIxaPi9p5l&QI>(S3Iir`bsWp#oEOA0tjt2W5-{0)!}&GoLjpP;Av zd~+s2U2`(4$yE!Z?4?c;Zo#6L``;PxcA%FbsY;oAhobRY(Zt`v31C8BfkG`(1A!ln zmpdR1en2W6wrC1~&@(N6Q~>vm#9?`sG zaQ^tEbDI#4VJRgrlrtV#-^;z!0=y^~f%@gtDF+*@iXm8Kj>GP_>tzfXcz!Sa0^c^9{^63D`pY z#Vru5a8^U;Hn|&;@B%_30>OT@-=->r&5VmMt`743r3o+TtL8p1A?3Nq!?LGICSu)W z0qxa17q-kVkjeSnZwu2K6hHnq`Rpj%G>eHKivz0A+Tf=^4-AaAyn3((GIafbkzq~C zNWcD?<#pyd4N9L9OQ}I0H$|!KYSn)FXp`d3`J;~)hdGtk``T`GIA0uz(IkR!e$oh` zk_OqloZp@mEmK@3ueIt`+6-ZJ4^lG6NbnsHfJb$3Rd;Er7avv{_X`7UtXk0|+i5#> z0qMk)d%V}2I5TAsIRZ(BG$b|%bInI^8UJ@9Ajsx$aWeU_3^cIQ4=(2!5m13BMkf-6 z+W7gSw6ZCV`6DW{=M}a41_U)wM-RfNMPx!XM4+uO$ zE?Fv~pNQHIf0!>S$fpSd9{$0u2$rUecRA)&agNqcqaVeFprc7T*;oW%i6Hq~1tN}3 zFrx;yr4QVD^wolC-u?hc<5FA&vJ9KvCmTMFNh~|k75keba_2kXAdx}!&VW@Ja&4Zx z=mZF^rv=gx!0~*$KJiDU>Tihq=?7K0jU<{3N~u|7jN%P}eGs%m%%+Hn*3{<68+44m zj)ti@m(nzS%Aqp5zP-bg|C&t#-~b2jut)g4(zNqfAaS%R&tDvJm=r1GjALemJz!U*qEh3*3r_u)s7t^*Vg zKx1kRa_K9qmIwv<*0&tFZWXg_LfO$4TEEz;7UHHU&8A_Luzh z9M}%83inh!Lcy^FoXA=>UU*-A{}q`!EgBC!H1&=$vH^EB!f6OPVWVbx|@oS z=B|C38<}CGD(f(Tn ztIr+6VCxak3D-x@(d%?A8gxED5(q@-{7=a~h9Hm6-qA~QxKM*a=Aa=MlwwUJNs7+4 zqbfS^G671am{oR(8B*)J#}dFF)y;i3-&vbR@Nh6-mghMYmsZ1De)sQLZ?r;mk1LEW z<8&_3%Fq-v+H1g?<{EaxKvNDsLQoOSZT1@s$a6EwUj)7ZNp2VV z2;)$Kjj?8lF&vh?y~Q8wmu6vD0F7BmYyJq(|}A-H;)To@Z-g$j0cfb>tM5 z=Y71*%I5IDq4@uZ2P}JKIzn#5*0=}~SaU{U)uvim8~!;#o0w zVpLfUgNz1@-R6umtbz4A7EeGP(Q}A_MLXnf>mNXEc8 zkp?_mz&-2wHUL{YbdkuN(LUxH<9-}pp7dU&`vhF`^l!;dD?V%I9@uK?l z2A}Hl0>cQ%oPSw=K}X)FGlO$$WeRKyEA+dSvg7ybEc%`f)jhpP|9iYiQD=K9YgYXv z(U9-K9N+bljEOBR!cw#DtPAxRqL@2V$=!D|IQ0sX5o^Fqg~4%YA(xaQ&0FG3+3Y!l zY2n)Y&Q~LaVI)on#?6URhweClcQ$q-$z06(ogehQFnSAI10E@vWc`bHp~$#y=W7;w zHS>;nicr_+Q2@$v^B>_**(ItR9UyztCbo+5RaKWisEkaZlEH8wquO@h?Ro$90K25f z&2F*VK18{5NS|Mr9%~7sFqF&DgWHu!4OFxHU07)oYBCHZ$KeJL@3!87_kurJPopj( zQX>iAoDG`1foBrOuO+Wcvub77>!LM?0O`Df%ragf5gJKwJ!d0$} zj@VGO<*aHASB8wNP7&-7-i-IY0-c^yw2+GK^{{FnhLEE<*S6%{;(0WGcTHE6$-#2c zy`0;t`q#au%{l*7e_&Yr1i~+Y6u>(ZXJO)CQ1qnBa?_r2!v*e&Ym1AzeR)`8U(W3P zK+_4FL;Cg-4oe}m$ADntuTN<{t^_St1e-nQzzU1C{`v8F|2dHI!Nqnti)@9mUJmwl@Kp0e3DE%!ooWnf{&ei32n~o6 zO+Fr696h_3gn(ZKjBpvzYdvSesGf$!tTM(LlcFNge{DerI9L7lw#ZM?8r(6T*<3WX z1bEy}+%`M8-T+VlQSj4Xp?%^-$1WVL)2)c!#S`^T5_2VJv<0OglQ*%uKb#-q(TqW0 zf%0#4(;(R_OMKk~0oNbBSNxi8oljH`J=vF~X&LRGtAq-lzOO2u?EIYss7(-h{d8FGVzANlpQ zZZ!wF4Eqo+MKbC6-V1|vP_f&Cou2!9qSqp%WaTyTm7Kf4;D7yrp-2$3I3AqwFXCdQQ8qBR2eJMi~8XooD%Km)|CVd~fFt`e3YJG$ab|0ty+77G8A z?=5w2?R6zqb^6CO4S4%xvO2O!+O)EC4p}9{Ym4+7XndgRUb!*20sGrAdja_25~;m< zluUw+D^y7XbAY+{;jZnBibuN;x$1al%JG5o%x9Jvjj0G4uV`efr)IDm!!#$Rt_xrtW@%6!t7$I?_`^q|@*H+kb!m(j~fS z9qj(=i+b;pqqOQPs`k4;0ua4(J~jOPn{OpuK?6Fq`~|W=C;5R2t6V6nWa{=AXb|sG zmtnkQf!k=5Mp&nU+M=Q7Q{-S8kh6qs0=s);TbH63x4`ZIiLzdI0|Pi-&-k;8(*+me zQ_vgiTpwM5#!S6uD^g6VUzA#TKwv+azC-x$Z>=>!r=fulm`?TQRX+*&%?f2B%Y!=j zq-<9EqU1WwD8cTtf*RsDC&a=M?^9p*qI|G;i6@px#8zWQjVO#G#|GvdFQ;f_YfxqR z_XHy2?uPMRO+-rAypQ>g?q+odfhA|K;zQQuYeT{EQ2l(hL{Gf%nkoj9Puv)$m(Z?# zhc{Pk_RD4jXbf&b+&}V#OI!GP5QdPuTV997BFYd4hd>> z$%C|%!-IQw-gdLgPSvpuu1TG_)q`ysb!Bld!xE!ffMaOlSFgUtJt33}C@a4=zV`Mx z4zDC08dqA*)fkE@vbXzH8EI$Kl`@;~RNd)-A8mm5tVxVEa+hNHZVKSR>1>RiU#Qgh ztu%wJ1;$f=vnVG{y};aqPvZh@C~y#c+ZsFc{P!sz{cyYJF(45i_&EaY^{!_*DkVl? z4k#U)pW9l^q)H$td|&J$9fIXxt$&>+k&s+)rE_^Hn#Ef6fTpG;daWilGO@0U;_t2~ zcYY*UAbC%KxMlca9~1g6zBww}cztb+s*`nuGu0p^BDPU5G64eyn#XD)8RKj|2v6z! z{xN#~UZH|z`Y80LcOv_I^huQcAkS!9*?L&goERNu$CI6*`Lq-Bv6G3)e(+ZR-jHZ& z?Uj*UA)dZR&%Ou@hug8{U;99^pArC=?me4~V%ojF3xOl^^fD7r)O%jJu)W~3>R7fs zc%HvBEA0NGRK-JDlCZ9OAZ=W3X+_f~9v0+E#8i-u087AkwgSJZMV<@|c3N}(tRS6S z$uD4${10j&ngF(WZKIhUY`tO;?=sII)Wn^!8y(iJ-H;xm>)hOI#*nnWie2zfEOCR^qpZ^3dy zTa^GGkLEm$`(9nSS)E_c=%Q(F=SMK^Hmb3|3#I}GK?dro1gR+sMg+aTWE0Q2R!`7f zKh0J<$IJHM-P>=fnYa6732}+!DRCT}3B=2CIhqkyJH#?{MjCHQvUjFj^O)G0_St!< z_*mG6;>wi~;rxDdz@TpppQK2%?O9jNk2ERI*?`mo9A0~_^y>=j;4-mlhwMeX=r9CG zcq>*3@xz<%-Ef+E8G5Ao{1ezXmT;NGq=ik1jUSBLWtTQ#Ezw{y^?vdJEjU4JS%JMJ zIxr;$MlUiAw~bEeRL+m-u3FDO*c$7nyyFPW-scJqJSoVC26G^qCxx{~DGcax3KT-b zEn^r04@?Z{lC)LUn6^4`R0FM@a?}LhK+w7=om^%SomlM=ual|zik<#C(|g1MGYu0` zdNDR?uP;g+Pz{64ZN9W0Az~xDnKP~f6OJ36yz^r6M%sQT(xjpEJz9Y^B=TCuku#c2 zjYyj|zYDp>dK(v2$*J7rkec4Ud)T~v?q2lw=eH5@r+*p6pU!O(&5deKoFXcc9EPgT|DL>kBE%-i&Hw`F z?mob9v{tg%SL3_?N_D3$uG?!%&E1|&bY#`!5VSW9SuYGb;3IspWt2?xmofP&ZnA*) zyE%q=eP>rx-)~rihwH28K2P>?^#Aq5Eo}{G_4ll|J#?~!Iv@whhms|H$j@}DCeNbM zzjEt3*Lg`CP;CC;QM!VYatO13Rk@K;!@0F}`1(@Sq6TewUt0!`Te1ju$^)KA!IZ6o zqO)=cRM<6-aT>54>;PlQn$0p8?vC!z6R>e>z!G(;(-&E@1LQWJ^v5qYRF<;+NXtbJ zQIrwEYhN2tF2EU0WZ@K6*V#lL+G`#ZVeR*8Gx zSQv{Av!?^iJ zqO?og_p)t3?^p*g*{n~fx2!6=PcDVVH$E{UR5$+u*6uub=EBavE|l-qN$fK23&ujn zYbZ4>XW9OaaMvk*78|!+`-7;0*AIju7U>`xL38xz;swT+#>&WWj`1O>u$Q(L?|XCv z-V}`8>T_&R>eZ=}>2j;rHaual;^(g-B12xW7LFz;^2@QNcAZ^(Bs_iUAVBS#^{wXc zAlJcaZQk?H|41ru_jiSFxnX?;DU+mLUIpN&h;$0SxH5EN1Ea%t4-J7QS~IATGL1=c z8iDmxZ_mA?{_s*>wvKvp!~4GMZ6!;g71+Z)z9c1gS6%}>`(PpGtlQG3&c9vMQKNgy zjlS0~%e_gHWOD!&_28z~GRx`Ft{}KK(zVS#pub-^_1hU}qc$a|)|PD2bDHPSmle8` zXH;MDoY&G^Q=`N(kpEHCq06Ye%mGi&I)DH_f5534=|raowEhzFZ?&afzyz$}CAc-| zX|Y9yalsH}RdFV~7zGI*ivZ~l5QCNg+5_**h9OX5+PIuMVIROi4M z%kbqd<-t{Q9G7F#+1}IT za(?jYHalaL6jJb^-%?})57T`c4`?f}2$b#>dTIZx<>8(8030f>TxjZx3l_kfiV~3G zDYMEE#JiDxtqB$GvJX$bLKD>TvS~w0L$8}7Czj|&klv56_tfuLA`eAoc?g-J$eHxp?XzD67iFW43- z`vjK0CvhG;sCStoScj>KX{95wOTz3qlO1%sk5}4Rl^Yeh@DyJVDs6$>9xo8|6I6~m zpWfY5z>5eV6uYfk4k$DmxuIX;+!J3BhNZNa@%~{wl+@F3;Gsn{jtNyiri?1~z?Ul! z?YRT?b!>}b@rZ)2zV*m4x2KRhs~VJgaoBa;*UIHDD7jwg=20IKeGfDcIyoM%>t=tn3Po5{W9vKmcUmzi;n-QQ z#iYalBX#PxqVagN!QK2OQ-5GTcTTP)aOqu zp}Ns$9*H+r=I>eVybVb>T?_#UOLc$@6MRQVo^ACpoqe&${bGyhcM0Zz8~*tQ>Ad!H zDoX!iVz$}h!-Sg4sat-j!xuoOFM+8Chj~!HWp?x%8>B1T;QHn8#9BTAsnUptr_Dfe zP9aVi@Qc>)#sOzEzs{9EeMluv&&Bpd*oTzfxX-uIx;^Z^`Lg=E7YEkq2tzG?Ifo2t z(pm0jr{!gT4(<)TC%i&gSnLndF%^UzO5>v?F2l7#ogdGBYI8ANHhdgalH_Bl%sK{( zh;vifM;7XF-OQ>-%dnT){GS(KUVq2D@vY5L+8eid8xNbMPx`#BUv#b~QTp%_O&iFA z(5bw-xF?r?(EXAWOOEG>v4cI~{uxr)QbC8gQqo5|3-$NbQA`S3f88q@o@?n&qtnis zHY+o2)bn~JzhH>6Ub}}?&C@Ss(Vx&?1cT11I;i_96db)x)Z`@KDj30mPdTCC6czIC z$L>&WhxIO6XD?;zWxl)=>Sl|=+GFV@>gRlN81^BQ_Utt|Yfo%PaI;oeM+n2^8{WeJ z*Y^B}mosH9UnH6N%kjgoFGYI!T~92DBR6JhOH(Iq?G*s_%fG39nP%B!6z z4#UiJUb0Ea@fFa*JnJFFfwQn!hSXS`&^+!2uf?#`k@@5C>bJ>Q`6;s1eemwTj5%QY z#9G-`t0kF1a)0=T7_ERgFXReqTLwbKsGdX#?}3Xl{=5H2)muhI*?#Ze(nAj=HNe2o zC?Va=45%~`(kUQFD&5^(f|8O_iqa|FD5Zq7ARPiC_&>+{_kGrSUf#aA*BY+t+UGv^ zK90|Uw6vVyHK>)Fsea6HzTp2cfXOPOsfQ~2_#hTBwP1|jgx8fZEl|u)qvnXxpAmXR zNF<^yjlBwyw?XamXFeumgZdW+7RNP$n74WYtIpSUcbO>zLEVuEAmN!w0httoU^`Y5 zVb@y4hJj_pG@^xmu#|&rP3{}X#!ci2t1s^#v#w|O;a{$g)KjD8L#Q^YD8^FBFF%hX zP$UY!6vT8<0&8DFpzfyObP0b@qQ5r@*c`^@idlmX6?*-FxU%wHOdrf0aD}&xz5)=a zF3#6R(vO=ydg0(drq23>GewWO3Hc}!PL0&{#p=z>-(pJg1?(1~L)`SlR@cGtH}x#h zWEskjy-JLMfb|F!a-Q%L%vPE3_+OnKh4+yYgd|66MJuqIDg=iT^WeT8Uj7VXdv!0r zs-t%DNm{zxx2s-^9rEk9#kqvRs_?#bBjk9K5h*|T(-f@v?Li5 zjf8dmKb-W5V&ai9rG-Y4VqdeDtNHVLcd(YSGP&C7}S<+jh?Y`;a6MEL-Z`dI=Vvkhddy zI!j`B9c0}`ys7k$!Vs!cXjQ)O?om<;E^Y8^4*w@jj^>E(L9mtiULDv|a-LUxJX7Z( zuJ61iS=d}TgOW{`%63mbbMY;R!)?V9^zUX)w;%p_Yuu#W6RkC004<)xi&IY{=&DY` z=L)xc`2rpCQ(^F;w*$V*PY1r+LrkZ*Nuc!fXoW}SvA`N|)Ur}5^*(oN-c%4S_0)7+ zs?1ay7ENp7myC@`;ya(dbou#% zL|`tj6X;7%#K$?rnsFmWwibO)lXz#1Q|*}&&atWnp{-zfHl?YoAB35U?;YmH#=6j5 zkw|TEW%`J0uKii;?p%i+$?!^5fN;+^le%g~yzk85k|Od~-qHt;Z#DJSSIHif$qr5K z1?$6aWBQF#?mJ|Z@d(HMeMa(Dt2N;Z=Og5aA)oPk6u>c7oW#C7h;wO`o0ZfNv;Bkp z&di|Bt;FwiCGs9f_F9gt>wx)Td>nPnSP5F~?gQLVRSltoi4WuzFadPCYsb<4e8eJ&%-2{c()S^xm1=~k+1Jvih zr@*r^@Mf^wRTBzyrVsIvy7C3O;+FY}ck|%2h5(@jLcU+Ko#q8F<*mnB)QF03V$@=8g)|kDQKv1eZA~Cka-; z=>(I9Xhp1a|4BIEoVY4prkNbhynEk9$-SNaS*sR0!d~Av;8GwA;Yh$uJVTlL$ob)l zX;=1T5GHSuzxvUdwvb%bs?>M!qc^Q`78<p?BDXi19v^X%*=f zpnA+(E;nDMyX?-iwQRDvd$~wgUjsM#w$y6g_$BZ7gR(E>OIx?PZ@bZ?vq$!jcDs(u z)w%79Z~gFr#XFRn%Zvx-I+`7uiszCWlgIrmY*h4)#N=xvR$gWnrzB`FSBu%be<{l9 z?l5)3P=D3QD}JY+o9taEsntiD1NQmXepSD{_x7jjZO?ahb7g)kztt{|c=Q2)3+i-A zj3&R_nH0hkhNtL<+d?KoRM6))=E-1>H;FvEPd!5o z*+!|~>-G_+wr5`Wlaaj>B4f^aBt7NX=!$E$&-8~2)u(~#4_hdURO;y%yc)V1Y3+W& zOt5&qEm+XU+saoYfVcy-;VR}}w?k(`i}8OEXL*X+uXMkmbf&!IGB=6|0?`0_nx zMh{tq%8yI%rWDXjAYoiWkiPRslJvfKI<8vkGW>Sb?cV%X`y_*Yn&BC{}naTKqoK9!S`vScZ5QDpZwi(z|bmlNKl|PRo2Tg zMYKE!f*krK6_IH7UX5X2oUQ$)QDPcrT6qsEQ&*R`w6@pmXs<2UdQmI|VPApFf3)aP zX*y{C5{3x3)P$#erm7d5Bkmp%PvSJ+(6)B@%S1%~#+r&GNN#{z+sRMFIhR|wTzhW@ zH%j3dGxpyk2S`0I&}dC(RB=J0lfuhX@!9OugFKWQP_mtQ?k7v?Sz=lwEt9(>%*p43fIxK%&#ej9xD;7bFr2Xrln3@4x?5-Sf(@(iC z52#XP*RRi~^CQ3Dmnht3Be%w>(9wMuNrI!FPJR^0n;?<&o<~&=J|fB))>Gj2$)L-t zYeEeCQA-@Axz&8+JuK!9$8ckxT9-`;`ZVIIP=(ENH8?s}#F&f5^>-b#m68O3J)Xl? zJi6ka;5t-uz~V1ApJ`Y`eg@p=g=(@jUrEzhm1wXnjr0T;VffqUfYQG07oAe)lFZ`v z%h0a;w|HA2M{B=vtarG0xCawLCGOC3nrg8Ns=SRl+kQ{y?}F0?(fFl*D7YCm=gt^Y zB(I=z8Y={arkd)pHF_mg|-H|-}+=Gpp9O}LBV3po5xh?5E(!c#oEh#+} z$(zCwiNUJmWqsbhwsKW+lKGkCp0L`NWV&7r^lPA2{>R4VYKEcSX;|c~!!o$2nl0&V zN^CxkLToO}!K7FExIi=ZWmgHSw#F1-oJlcvNz;D)Au8_WUJ(O&+tK+eplc{|9~XZ0 z5(Dd1W(*b24?$~!ez{keS8@N{jS=8({N2FjXKINXW8v!v>auW#t%b>se-WRKF@s>QEMsLrzvneppj-XV>5E`+FE>(?ovFb$cc}f zrG}93&`~V%QK%EAJ?6AxE>qMJig+kaAapL{Llv(J?rKCf>UuGLNxskanmwluj{1dP zBZ#}pfIP$PVJ5G3%cHvwv6*)JB<09>VuF%wgZ#3EAR;hU=u^(G_*@z_lYt}ycey26 zaP1928Ue9#AZP}QQOPW{&zVXXRVZ0mKEeA$SRLW%_002P1EmB0jS<{wE*>!g`LLmp zp#H2I^B^llA8Z3dYkZ5(ii})nD64e+QnkHBL!Hu|d?qpn1?2)dW1wTVkmv6}KgMT;W zPrT?u-G29wv+%1N#%i}5{~i2!7O8Yt!pu$Nt(XbGPT-RN>E?K(UsGQP5;H2IS4kSv;!i|d3#g7lyj8Gmw!Fzw0Nh|k@Jdk3DaWe z+D4=%D3Y+o`P+xL60F7;w0yi$6iY0^e~7CZ?NAyJncs{gp``WHdZhjvOM~{RYhOp| z&VGs}sl<=no_Z8kxhH*ELu7QETeZ-n`HO>TK-;+u5!3#i`Z@k=TmO*fF)OQDdbeKq z>jH-`GJQ^CLvlBq$S+|}?&z1*Q;z8T$l@LUD)cbm_s?&di)wGS!i}ZtG^yCA=rg3v z)HY=0)i^RzwQ+_x<%gEbjYk(E5II(TBiU2!CX&m>Ap}j}lCV<-#A}ke+YC;lkp6*0 zNK;DPM>u_{#_Ro`6Cmk{7la$t@7uZfgj-!lOVQ0>fvXfhc6TJYdUw-xCu=G`o1{R% z5^1U%*N6-46(dsRE7=US>-Vr19KyE24vk4^|(`{J$%N*-DC~1fikLbqX1;1 zdW}mgL~VD0-;Yu6u%!^}C-ut33X-$%DulkPM=-Nu0v;GL?Y5zoB$a{;@fxyf&fw60 zl~48}S4u;{wA`R9=7va*C#f_)jYpU3$@OM7r+#fA<$(VkGEx6E{p0Hh)q>`9q4Ad- z`lf^FNsq}Xj_O9$Vj==NLr2VC%+!KR1+J^;sOx-GqQwr zSq>_!ohJn9U_kZDWz!=ZQ7*M`1oGB@%ULc0$FIJJtYU#6m|{pU%=Mxt#CUK}akFq~ z*9aS81!J1-tVD$|r`-QUnSjK_w7ES5kCRh?89!l+;G)NH>84@zh_Jp9x3Rh`YrBd4TaBb&M?;*ARUAf32a)Pd*6K;7AA0;Yr-=c1 zt6(tcInAe=cuPU3>AKmvjIpA{5#R|{P;}|F$h<;gXeU?Xj+BG499PwGbMyH5)8*<2 z$&)*H4)<%0p`L3XXSb|i1vqF4Q8t6ZaO|?*54hu&X3bt(bg{gm{}Nx*aTJm3-AAkN z!&8iboi==37C>sWYXbbm&vV`1C$2b924pt}q&&UYA~k%!XE)y@`%^FDK6^&;YBXD@ z%A+RVQU{kN1uC3-cMKjzH-vN{W~O%Z@gY^8C_<)fh~3qph1?;4JfV(V{!%jdaO=B? zl4`qo`e0d^`4h6Uy}Tvgas6ygPPwBS9dpd8RqB6sJgC_IX)KQiDT zKDcv2c`^rdUfVCXpr^;{evxqH!g9fkf__ntioiDe3MMoE9QtZ zA}({6fRYV(oZrD)y#HURg~PPs8l|+1Z-)|gC;PF8=ec+cskn_N4=^FJSa8Q6wpemM z*u(Jp#MX_}9iR!z0atHVL&yin>ehLWZ^x@vHY%xi5AL5qFz12mQZNuQrX+GR_i=2v<_49kY)ax=CaWCc>HbH8mVkq zMT(~LO~UFc-@|8NA|TM>vYmx5m^?TsH>s9WcJO-Ol7anZHfTg?^b*YXF7^d;%LNLP zU8FoN^8d76A}@UpR190Gx?LY8mVtDKKpL5cUcgv9xySE2Cvr%Ki#!YOA>oc%;Q)R2|PxrK|52I6LiO)&`OHrS{ z!#F_tM$@yv0XiIekLN`y$6r!4=vK1Eo`6qst(dPp34g8MaeoVlp)NI7=b49YbtelC zLi&>);yt$nb#$NrLbF2iCOFdN6IN{9Rt^FtAca+KOZTQ)W&Iu)Nf)RcVG>!^tm1}G zpq*ge4O4>O5mQI}CSSny3WI?oQ{1$BQjf}Z4V{QsnNnvPPiGW_Jq=OH8P3izEpusv zhbfMm#Ncc?f(n?`6R&G&CR|!u@%}>MWwQ zC=9vrusKXFN+wFNB2in@0wk5ygM@WGWAggF3a!r^NF}$s`V10nmI|lzg&CUffC+{3 zKCV>ptxK4|{($&>jN8x1Xn%LVJ6S<8siuN$_w;*3VU_JX>eY~imvM^}*H@+75=y;J z3Kiq_Q><0Y)2E(b=1|}@(zSl=b|L0=&tXh*)R%fUVuzQxtXDhhndVY>eMxBs;fAo) z_5wHwfG~+Qe#jKAd`y)}L7w>cLn?N;tDZ{psDxK2p33gif#PPO;vo$crfaXGaKDe&`7)aMxc(xa`Qat~9dI~t3t`^TpGuObbpBPH(R{SZRliL=bQ@5_^4R1)aem=b81 z?`QI!m6#Ur>}=G_eiB zBJkzS@B7Xjdm^K*+j=7PmrcOVF8q34Pv4}3IWPIqBW256uK=09f85z%T!*(NNY-e@mDH`m_8_iNMi{v%?~Y0;!}!| zX-WDH#FDzaP3tL1(j+vUG-gS@oEZ4=&zoR*WKsaVI@&;zS-LvT%uF2IlGY(c=TsIXN2L zN>dZ(_HFjJM0ERojTY9Qkrf`kmAY0+isY$%h_@~mF6YzLc91ZFU~{qrZ}MaN27AhQ zDnk7X7Vp^MY@6}a5N`s|QDCacX^)`v-ePdmeQvQ!TPVJmYxb%vzqX+9zHviVoKR_=cGC|3c1=3%iE;P#bu$Tm6$q?5?HLpj|J8PC`0t`*J z5sv7CL>0Nrl}ii6s7mzujA=L7YN1+sh1;YuHQfpgH0S7VK^z2$WK`h%%|nOJMeyl+ z6fQRNj2GVktrw6&mN!q(zN&T;DZZ`NuPI9D_ir_vE&cdvWl3YR7A%^$R({0nB{ zf~8liLwIz(#s;IM*iRGyo4j@wvqwe+-=LKvKXtV@{3I9DkdS_`=2DL(A*SJ%o30n) zm-i=Ffw($_S(Pm_N(Qo$WnJ{6X#e&@h}UFuQ(!Lo@4LOKTXT4HD<5vDrwu7r-0zaY zDe+t3)@FUCC0@CEiLqfzbWwmFpA#Xz)8 zS;xS4$?xY~HkhI0XSEIa0P`s2L)$%z4w99qoW;s17?HMXc?K{gSMeQkPGlc zMle&6mt!@JafN0;MSx>9{qupiba(Rk4v47GArFVe-^8d449F=|6k?d6?rahd)B|tk zAV?(GMZlOJ3gn_C08ZcD<9omO4F2K5)OSyq_#yj)Z0}dC!7$xn)`uaEVGRYFz$^Fx zusr^%iUt-9zZlL9l3)}rkewe8b90JGS$Qvi85+|LFk{;J<{0AT4tjYSmqZbl!+af8 z*kz9W_r(h9{Xnyu_TaIZxOJ^%NfIOa1KXX?iLT--i!eUMTo_|kfz5nu~`Sw8f&ROK3L4i3Nmq~ur7ukY3 z(43*TsjJz&$;sI?FvXQ9SY>-H6_OsNmCM<$6C1hZxc~G-lE5Af9G>|DNewTy#L=4Xn1s2R$ z0k67`tJ5mj%|X`JBM9tHe@r@1zO(SK*b3|Rs?@c+O^})@ zFrs?C4?!N&70xKM^}Xxz#i7N+S5D|N&=&fg$mEV=p*6xF#%5e%w@G?Ts(l!yHqG$$ zBkARYVwJ^6vEEy{KN^*f^U?bBc!`Xm-h^X`_ z>p_|*4vy7s+DfrL+y)PqimG%tosW+#;dUXjFisc(SG?E?1ZwIgXEAh>Ark7~c8C+I z^DQ>|ZTeX(o*I^jTASY|Xc@}jqc6fMS^b?(Ak?YeXPR3tUhyOnHt;_!K+04n)&^IY z6)*NR)K~d{;bFftod}e|qOX(sb1Kf0Ohy`3vXGemnuWnGpGauM-HOR0)|)*} z1RZ-pOmNYa)0gNf;;N#N&m*ZY!NA|D?#C&P48n~sNM0n+wIh8fU_rYAbFWntdzQYN z8WtIDE%>LVL|G~-!R5?P0{=hl01~You*%t_eXHi_U;t@)DdCiJ=W(-cyj`58i11T4 zxW%CA#fac|StR8%;fmDU1-R`z%4$G|rrRkC9;c}mO#Mkgy`PIi7O~{}BFsvAiuV#= zN`$1SkMXfaxW0LPgzIaHDp@7;$@QgpW9czmnsD7(r8~gtEonag!Tc9suab0ZehMZz zL8Dt=x0dosWnNZ+nV7>slV8f3%+&*#>vak?4pq|wI_%ke`Oq)7ET2x%Q*?J}$`wAc zFma7wQnCV)pwkw+T<a{f&d4WtLirlX*FVTJ#IT zVk?mqGnhOfx`|M29&@AE>t@PLEjmJ5$V}j^NS^Eq46a-QU2@hJ1C;SM7vGfCj1AGiEM6Ky(>=L8|z4FRq9?_6L$$@^9g8? z>@L?hQgX>by7~$}E$qKe&1@i`lID`n66NadmZG~$pz{j?t7-q0(|UY7+>XRVQvd1$ z9t|#15T=(ATckYv&gS~RX7Vc;f49H`L>k#jR5w`kS zc55u_hiSRyir9m4VsQvsxD~U++#O4r$6`^^cj0o3Z9p(}NrdHQRPZ!TRDq5IC#C}o z$V#*c6`l%2$^{ez5P2fr$DKmyMV_XiC-8qCuKBadQm%}Y-Tn5_k%K7_#)XI=ojnT9 z_aIL11(vspnC7@+l>Cm?V9tRLv22_Y!7TLli>}KpyK<-@&^q;DOpoL=IF=yK_jA^U zh4*oK$%Zlk!e{sd3-^h#E6QlLGdlMvO%P4B`RyGQ^?Kkxc+vqf2Nl^K*wYO5Amb(o zrjt7CHv|bimv{d}$TdPyi7u>^bb3WPBDaThl+^DSE30#As?%jiYs75cWu_v~o3Z3y zao#Qg3H4Fp9V(+aZiYW+w5Vt)h{KA$QP;k11q=tBjXfKp?8+KoWc9qY!fKhI6i&J# z7I^bV*TLsw`${8lVFCK@&P8Zv=tcaSh|~luPxgY()ya}HTaRqLQ;W7utN8xA60`g} z`o2uvBWIrFj{)%!8CDQv=~Lqdw~`1CvGF2$+-`n;OfAkj*XY}%_F0Un9(;;pqD0TR zF?~{t6|3^S!d&7+?{Q6K^&g)xcM5r&{dl;3`M1@B_W^gt!Ena`r@uE7TIa>wnh<5T z1eYSfOekb6aNZ_MFwQhi;Uw}s-7R;xe~hJY*u?tBFQVZVIeP!9eJ4*!dx0JsF~*a7 z4?N3>Z^sqgTKe}m$`shl+Jl}+g3qT(2!HTI-~42RtQ&W74zLG>9|YcPP#bnQ=dA`| z^$PPVc&k1+ix*v@aSJE1#~nDwy%YzcL6g|rNolt)oZ_Xb6vD0VLaaK2zTpuEbIQY| zyXy?vQ?#Y(iYf2k$$nFq9P_!19x*wEfVnnnyVCU1odgJpR}}@jcwAxX#)eT_W!^EU zh3oAQ^1GLd7(C0OzI-L>iD#ugL_U)P=z2U|1LrP5Yxx$8iEsL(-M&v0b>=vNt@?Ds zd)v20IX-MT3Wr7bvi~{|8q9fRQ1W(JXuhA%)|k~WdqP#5SNaB&CW8xL2L$~iY4M#9 z3tWs?OTMKe$YI@)KM~&vdIM9CvlP!X#H^so40}t9DNcp~_@H-vW7Xxm1zt-5!Iv9Q zZAQXw6A4nrCIxXK2arGMhO{AaX&GS(soWv03DuJtL_%>)xp%iWDs4!54?|s8jiJ4` z@xCPnnC=|dbPhu!bncL?(Atf|1BwX57GmUOM+tS#k4?`R(Y~OwQF^s_vK<(zEU{5T zCU$Kbm>(%BMt^T%B3Q|%$hyVSn$WUQv(f1&x5!oLv1F_+$Pu7MZiThEJ-1!64HU%= zi4Q`+KJIcFD>NW&z2VZ_2S>CH-Q4kp%N(?s{-z}wLXR_qrdkB3YBawdXp&!5fhr~ zj8^e8ieF&($ZgSuJGt@f6MZZ}+CyD+9*MN+^Klub4hVEV{CDOH?0aMMwsFk~y4W;% z1ws-0Ge7Vgl0c?oF~LC}qz{Gos0Z#rHho}-$?X8(*%I(ADjc20#JDfa#*w~Hr;4K! zUDjbVLAqYg-Q|7GhZf{H1r3~*>Xk*R%xcn+*H`P@;8Y}c9`Vb~2+8rj?xz)epVbM9 zfBDMZn2Rz))NyzriSCxlWw*E1^TPHLO~RdSV321|caSu`6lGRHqh0`?S-rwVzP9-I zZgKUnJ8j3a#6?4OWG7hy9_c|QP&J`8Sf0A)cDDH97@8Iuvr#B$uw(*$coLPB|Q@?CG;2;Yc7cbf0nS-&!VcJfiSzeNbSY^lox2>vxq!HzJc0ED7kR#$pA%wjUNqot zJqjI&hRDGd!4aeI6(cQtX(5Q_d6=GJMv2`34xu3R(nD8DUCNv2vpffAY49-hK!ZvB z^^n+O36BAf{ZN;6$)%>C6aL7;FIIQEer%)w+oO_V9X%>g0cblIG5v|29C2Q4t` z#DQLp_^x`T7lD8NA%QF&DdVn=SquL&2$G;4>a*o^+O=`W))4VAifFmyZydu;i6#|O z5L`ty`Z&C`(Ye*Y7cP(A@a~^x``t|rx2#i+TAvDAh6APSLx;37L<#vYYVglwo+5oD z4hfq=s1|oi1Qs3+(-hC?aOycXJ%P(d(I06R>op3S=S+Jm4rg|{)q1Z*&oHL zpMF+xe^lRRW0QyV5nMCNDF}O6Nf28T&ka_CR&sZT4SOV>@33;{y{!;07!-w#j_##% z^+o(+;cOH2_UUwdPG7~Lv_W;$rzQ!H; z1TVI)n#uVSJrmEH>VsZ|w}WrLIk^cyQl};9hxOb;uf>jX^C2N=`#?QD&TnU>)a^** zlVTqFT38Xz!4hP&27m#Nu;MU{mtdu6=Hez+SR5hi7qOO$qiPaaBj_g3r%z2-%;q{w zBnR%FbKdd#=n4nJDGr@bve0xFd6Z0?^0N{xw@k7=B14sCoSXQ%(LeXi3<5jxl%e88 zT_n?Sfv=61HT{8a3kl>)1-N%{>qJYLx7x0MeM@ijU=H!4UfTm(VBOHy{IY@5>?d@% zqRa9&nA=+#6ABe6v*ZohwS%(NI3!{GizcG;K^Bd<3lPDb z!TmRX*kUkboSnVskFdq|Q%ciM;UuG30y==Lv5nyGtOkf}H8IOMiVUlNAD+2xzIpyV zAr%hXw9Jv7N%?B71Wi$*20&mlB?cP*G|rnCvS&;{WfP3U6+;%b#wA#kYHSXya?>u2 zz~(7As#a870>=Z&GFGtVbhu5KPm>ueg3@#&RSW~c@b~lNP1y#(V{CX5G|VyR-NdnP z1$ej>AFQ1U`@Os3gr{C2<66zj93;%#<2`of1SWX#PQ{6wsuzKw*yN?L710kq zYk`wW@bWGFQwTo&)*;Rg`f#hR++RS#^Fxa8=*Ol}E^gS(J-ieh(IoygFQ%=y{X~BW z60VsDDjxkE8HRps5J%v_825pNKRAxcQFW@?q@IUMb#@hae+SQncX?5!GbS@Nr*4#Z zZVdmYBZ!dO1m!JDTt)s8d*v}r4^4EKj#v@tT6{QEn7{&l^g)27pXAkNWkTv5{}$o` z9g!J;y{7hSEh#%KWIko%#9}N?{XWlEuu>}YDxREuTh(IxO3o^_ zD-_Ww0P#|l*`y)JY{WHGvGm$}1fD<~ta}Rn4rHma4`=et`URQlef){#E+cLLIBFGR zx1;*xh$M5NVTx8yi)5@H9&wOmn~787VZ$91(im^jRRnuN56DIET)lw#@rgXHLZ4Q% z+}DEEK8tmQPg>DvMGUcjcY)#fJ?%YrK2DuM#O0wPDVO~&og>Ci%*)Yzu(G`qN%-c) zGxH#p)F#UM*>Fp@nzB7ro#3yG-Jf%qp&%&xH&?5hIsiP|C>(kHL?Fu+e zAj?R1lSzXZhe$8Y@Is)IVPya}NmBtS*5}CS{k)%Cm_p?OY?xURrGI55Hp)d~VMJ$s z0Icgr)*LNz-_EUWWANFZO@xvXM6Z}2V3|94Ve7b(cSI4L_lC57wBr@%0q1m6v%B5s zVpe(t3(i#X3#MbD6u~rNz+qt~z=1~e`Nh}CTJ;f`RS;E=AE0@dd6}9SxG8Id^^^)3 z6G(;1Yx;7OfnaF*aB>|;cPmU;cwJp6rXsw~)$0onVm1V=W`x`+OJ@K%9%cgRUkDxS zwwMemJ81Q*(U?MzcK|@;I|TY2^->>${D9xL%=tRogRU`p7e;Od496oD--4N27!z7i z9wxp+uIo^y<=nC-0t%CMyyd+zK-B*AwKnWJd^_Xp9=IoXy~6jjP(;1r{lXUJQLN9# z$K%iYsMTwM^Jc(R%k=rgDc%$SpDQdRS<9L_=HE3@B_R9+ZIgN;GgBo*$yvq; z;+`r`*iY`nNW&;-!(Cb&p$-q1xXJ&9CJ$fZJeU3Z=N);FsW2-xOVe!=UgUu6KAtd_ zI1hd~y9s1#cd;}vjpy;B!9h79@iDCJ3!pk31j?UvKy{qnnJ$ja51jSbX3_WDDdv7@ z&iUQ-#{cYWw%+E%)_WzKI<2+@Opnt!CS)8TN{kqr@|#ARb3dzFc=ZxoHidpok7$gT z@oHGE`#MQO@Gz^m%mU8R;~0h27#>vJtp6|&{Bez8%cEw`y&b~%-XMps@5?v8D-I^>^$0##l-(aLMvA3SJnanHDzWS($YCN< z%zcRQ>FIO&rmu6xeTqdDj0Nzr4+GyKDxy@J8vqMZ?;urED@NQv5ZjFflUM)qFQ;$C zU~6#?XrGvfI8WfeIG-xCzi|=$9g&i7xy*KfJVSQC*D#sa@BMryTDtb~S$lcG7_Bgo zYxlr_9?J9cFkj1qL6Lw&R~hR8sArrv;ga0f6g4z7J^j@7&l9+ zgf;`y7C42qx^?Drpw0?QoMDO(Fff*Egx|ZPcTN^Lh<4m%tdazI{p!>)}(8-c0 zFc<;tq1Gee&Nk7t-4GAr32aW0npR)0Rypp@_)06w3I-Kl=k1fih>Xf z8by&1S#F(u{1E<166j{&jMA9m*j>uSGOkMf$~Tv@aEZ`qgUHuj$Ex#<7K#-Wn+rTR zjd0QO5jvEZSM6`>E%f?n4SA~&Ewhke_at?90?`4&Za=(W%>B!VE8AkI`CaBkh6?N0OyV$6h{ zX%K8|O5;oU`+hu_xn=VQv_^bOw(EZ!E~Kj;>DR-rBA~VsV&EJm3c`-{;{z$hNn}xa z+~W#jg&Frqo$e&opGng|G|i)1-*CdR?ecUY-33Wq`1uaoKUuj)|78{y_yCu<6fH(5 zF{N(E7O&#xxPn`rgLe4Z>Qd-u3 zn|>K>gB#`%aoA$sKjoiR82Yt>5fLdwAu8@sL~NpvMDn^EO#XlBaz7VGbMve!*Qt=* zLJ2B_;1SbNY6jHplHk95aQJh%qvJMC)P_p8W497R@OL6B5p_-#EynCfkqqSAZ^k>= z@M#DhyHIAXDXdycyb1I_M#bTW-@TkTKbe$h-Ndhq{>{Ts*Q%HM5?RJ7u6dozG&5RW z7e_Ca-^cn;88Eekz6P~sRt;BCE72xVP(qry>cGi_E|L99uv;DR$#znrP>d#px znjREf+5q3GC)qcABk^2?(@&bs<2NY&GZQ{;&HwHeN%gv4LNKt8PvnB zlVFO-35QuYJr}ocEP9r_;Fw^xG8?yNpOCC(tg zxRx{(8NGi}(5E3KwEyC~+Qg@0U59y`=2A~{Jpa&xUIC(T3|5ZvE7{Wvwo3;TB0a&P zsUcEzbBA*jzt2Bc8BV=Jk}kiA-YN!s>YR_s71Os_C}WEp{Yh{zN7%&Nyq*D|UI91W zEQ%x%5eZ9d^F1f&-Pp_3O1R}X_EG1}K#uHRT_mMCjLxxNc-9yJ2o3-?+-!eQ^6x21 zW6$ZNh{Dn(Ph{=co~&o|y2dsiO(L15rQXl15@P87iIdc_RuW})bBPI@vPt9A>dtH- z!~E)Y+BbXgZ4tN;h9s)$y(Px9?#yr4I9LJO+bH69Rg`tj4bKF5J`V!DE% z(Uk%Vvt>>54)>zi$sf65aIz2uBOv*fMdn0Q3tD2nk6X)LIaMW+Vr+f^zCUE1S=f@! zVMJf9TGt044ZV+!iuacJzh~STkkYo9C}@j1_*pY6^}f2U|5LUcS&v#eckj}uVWObY zEjGj@oqm-;!E`w@qX~jkz}NP;Ku2jH=ie6^A?Jl2T(^2v%Eu8z*7E97E!FZ4s94|d zwVkF2J(+-yNE7aD@b5S3>#}nuU8s6B>3}Gx+nff6QQb#kG5K=r(1@{fX5J(7lY=U5 zHpN(l?{-KWbR_B7Ts!cl$mGJURb$lavMOuZC7cP(u@ln#r=S;1m}M}{>UdhgJ|yqT zg3BH{@W!D;j3bMqYEFC1%g4YD&g8OLcax$&S5juK&LCqMCS<>0FiXsx{TTh%no|rq z&;5~fd9pPO(5xi~zmGSD&rIwR2XbZ2J8vz@3Io}=Gl50#5(=lIe~uEkFW(c;K|K!x zXL;K%ESmuRzJarJ!&IaJ)Q!k?&Rf9aTT2XSs7#T%1^ zvCwf|N^L3`!_fo=sgOFh~wf=SdEim*%nn_TfN%iiCCE2Z~5a44D5qIeS$~ zh?Dx_4{cZee#^Le-2UTcGHvMIdW0MiywlVkAcv0uN|kG6pkQotrjp zbD6e!5iKNp3P7=2o9Nptaoa(>43nN{%88Welpan^>3#5y}J_ zS@q!10;~d|rxz~v3uVchFpE&@yIagb^<>ZP(%W+DwW##=eq8oOR_In&UCKO+Pd#}> z+_Dx&27gZwK3rw^&Kg8f-}64UUZx(C{p;s>YV0woT;YOWZ24}5;TuFwWc53uA_x{A z_Ej8@ggHaEnymu8b$jC#zgO^Ki5@fN))>xaz|tiURT;V{Ou8tg&eC@u9rZKK63i4I z2$cBl%7~3@K;!1L{$3vhHqev~FK)Yul21=m>hq0$d(uR2^`h$S7JBT}f-YADb%@tr z0E)5XvKYADcXW)C>^8xlE4>-0T1R=?=VC&Xp7^nN8+M1-j^&m&(akGa@PD3@O16rN z7b-ZfqvK^%eY*GcDDf69WJQ;T0UgZB9b#e?aMz$V|2B!-ndbW^lNWfbk;}}Ecg5)p z9fAEY!{IHO$yy9>J#I@)TMmu?RUm)tIYIDn?&J4^2)fdtjVLQ7jvf#hA*>yCz68^# zliU=ek!287e^hNyQ1NKMmTt`t&=sG&HvlHx_cW!k`7w(v4X^&2=)L^+(_9sAWGz;e z-e!U14~^eaZg-*p#3~uk=`IyyS-vQ@JjI5(H1XPjD_@(>uP>N1P);b@)0A@cYf$%8 z!L}JOA=y&Q45f!0GFo^P8e0WyEHC-hftvnttcKHgVC#v+`#OPY!#y^44t4qp?V{VS zMVyQ(FpUBFO1J2}dY;4^zd`QCU*3vvH8S(+$S8%;2gymxfqzYkVvKtxPgi64xL2a; z=PMo|x8-c7?)K51?Eo4UFCjJW7lI9t4nOg%z{qK-wdu_yJZA;uDJP7+d~f>S55)WT z1Lav0nIoW#bLhczrD%(afv~qq1Su0Q$gPRdB;Bh|mJ6DI9*+aKu_g#E6@TseKzIkR zB8)tDP((2%g0^J&MjK#TnfCN}bJ%I7Dmy}Nw#-lm@F=ay@hzW+3GRy(_khq0-IZ>^ z&gP$VF-KZO$l<1~+@huh$lLd}=oUEi7I!~5khW#$w?YWt_I2rk`tAAs*TJ%xD$=no z^45tY!Sg8{e^9?=N?BfX`vt7Uiw=Q4fOq%xT{FAg_?ojh@R6Iz?werLsGWv!pT?a?ST0!{Ay>)&T3;HXpBaNwqT9LtBT*#su)x-OFEusl z0gv%4?RH%m1v-k1+kmP~`|ZJc2Or>mZ|?2^&%zt66#&Ng=h1`G0_9H}(Y}vf=5AIm zITbFI9>aFjiv$S9noRP{>)h3gf}-cua4YJO8ecK;kXs zCNa&}M0A*^ET?gnm6k*Rh~Lf`o`jTUG41P$1{vixQQ9$vW5Xtc>W`X#R_lax=~Xo( z{l1{{ZJ`IA`0ompfWu_8Y$;LqKP>?9+h5#^T+Oo}_xqv6mm(x-K~!49oZNSy9Gzql zSeGp2ZmLuoGfr4d7684Ndjl$W^q@}Q&CGptzh{R2J%@A7%k3tL(gVULpnwzpEj2vK zf{;GQ;yXym{R=vKBcZ5=b>C>-aXEvX#3Q{b-SQO1+c(sk81=)wte8Nz_pu#W7NHgQ z?AwQj?!J@!3Xl?de)FJBrMHIf-Xtdoh!2%Z#;-&R4CR0vv5)q3G*P(v**Q@dn^Pc^ z6Ln%cs*4Xsf8fQTz{u>U3P+4Z0xackZJye3b|*3=ggXvvdU;;amAG6}0!6ZMJa_{> z|G?R07wuTnWI>+Td;T0kA0{A6ICh`De(B$wCJ74)&h+lk)@REyV8@a0^jo_LW55q) zVlo$EPb+YYcxoxH*xn5w3xZ@z`z8SLoJPEbel}fwvh^$s9kzYDk)Q3+9~r#oc3sN1 z^ymS?%D*&%T^4xNIF^<9)w3Q8NDOAj)Y;$tK-LL0sWlJYbZR*nlk``YdZC{*yzF-p zFX0?#9JN=6F7v_gTdIIK!wslSZ$0*WoGQdI`Y8&&0P#?(ME;#ZPN)*%oVI(_gc7P{ zA(m-uC2g1ZKnQEQ45eXnwJ{K%ZZeNPyD-oe1hRnJnqZyw;X%46Ao_OXgD7b;aExje zMR8Zs^xk;`n^`5r&^0jgKTF>%U`|_-#-fC>uoeT5UXEizt0KCkhC4#cvqo}+?^hZG zT%XOh^XUwdmyUrTkRn`;CM~T_!&nn+&4vJ3|7J9}!gFSC{vAiUyiuEvCoPBy&GlN1 zZhmFdXtJBoaJB#isS3K|oUaBqP(OrZ^K>!~6xF2vPhyTfQ1{=(l@VaiANu3sAjHMz zc_`E#(p9kC&nTut&bS@twJfx*{LC{M{P*(|;fQ~4<4G4w%sCa`q9Um@E12NuO+_z@ zkN*UTSDc<2vL+6?HPuV{jz^KVESN#c`@gUe5y+2%^iaZ2`O65*GTw zwLW7X01kgLt&N~p6d?ktWq1WN$V2J+51_C(54ONvr zr;9Vlpg?n2BYdIk=63_2`KTnq$^!uKf26f;=whY8ws@l<#Z1Mcn*{jq_iz4Ou5E4Z zsS^eBBMt!*ewe4Gwa_6G*mf?f)w7&H>czC?HVBGsv$J5#G&d&USOnW@8-M^gUY6XS zNOI6e1$k12BuhD!nM-*5Fvqp$m;2~WgDDVp%l!rrn2QPe(GB+&A@xIr-*l)x0AMD( z;$?}8;uk1Yvc{w7hx=MTEd<@9Za+zX0Z*;!fy*Zfwd-*uh6d;dLmVb z1&)CtE11XU&|sy;TLtf+iPh^%FYc z<;di_E3kNBbYb=~q2K?Hc7k#{U>yUa4`)u_1{;~?>qDg||Qi`>i7kOm8$+Ube z75}ab_UKVtniRFy5g;)MWm+99wc)S~(LT8I>P}(U0x)3jHDj<0HZ;ts1PL$!_JZHd zyj?|OVNx*RN!TGEG==+o9n{yUHSd~hH~iVg48?(%8@ROs`R*~uBqX@JY3|Anw*^It%)j6J5%Q>H=^gU+$ZI|{(S)uTito(J_Ju?lk^Y2Bq_fcoeJ{w3 zy>+z<;*}odQGo8GZNH^WZ#Vldo~F_pc`W(sv4C%oszVt*^?6it`7?Hnj0VS001#av`QfG)&!5{%uJx7w&AcIC*Ml3Poh5_SHDC!8`Z4{rWNuV*xoTKA zB+5!TB2IK~Oy_MBf^V$^LK)Egk{-#c7f610=_nNiS*;I`Dj$uKXGK~5!biZ^wOX)RyB@4p(&p}Rd`QEYW8!%ay_18`>v8z5msdt==ZD|BjQpK;=js$7@c&2E zRR&b`JZ-w;lF}`uq;yM*fOJWx0@BiP>5vX-X$b)V6{Wioq(M3arAs>Ay@>vQ?+3nd z&pEq0JM+vl71faxz!fxA{bEA7ynYFTH=r!u(7lUEM|--_r!JaJ;U4R{2YHPZXn-Jc zDcZ?>`mDc>>i)`>NC@;9$!vVVV++t!&e~xK&LL_PIcOm_jafA}X!YVjJnPvTvTpbc zKEhB^o@cRF7XUY^5#dAcchVF{NQ0L0O2vdeZO=rxky@jn@7xui@p@dZUY@-ZI?W8s z!ijVBKg1jqIWNsH{9L}>ma#s2WR^qDO|i3LYkt2mll!iL1%1f~z(dc$4;UebgC+{0z(#q?`&+Hb{ABYlA7?2;At(`jvyplOHv-!m$X%&$V<1$ zw_yjRzT+OK^3Gp8kY(+fJJE~DBZn{KWYlISihA_Cd6CT7CY|pEPeybi3M6Jc7t=rU z(W5H!J=0WizedU`Cs6AWh2UTb=*Lo}p<07xE$8 zAX^Cqh8ApyXj=VDU8<=fD3Dg3UEu>17G}o{5Altr0!($`<>@&MZS6?!!c5{S`2y=I zmMN_DjDg22cI*SUETmK5hwsUrobE@594rz}8ltH%1&R*L4u4k7lKc&Z0iiz|FM#B* zo0p&G$(u6*Y|$%Vx9^X=A(umTmdg_JSpi*G$1?mW`nhC9PaVUXv{H)6LlupXiPKIpM5VkDCpyz zkY1$JNgkGhswfi*f=NG#Ainy1fX4nxAVLc^c2vPZhxwDJT!_RoJP9JXGmN>%VR96r zMTRQXvEFZvAu=8Xy-7cYiP`Sc`aBDz&;F3Gmf~#_)Q&4s0)?en`pjGOmxIRnntb@^ zaDoy;Y?y&qXolypw6S>*=uguDE9}V%$GrO8i-R-!wV!SWvS1?8sE6aYuGMpY{C!*X zj_-O75D5!Veet=<^Ec}(wb=pZV4W;RG2rKox)uNpq7|I?H~}w%YEt*sl1RcD+2ly_ zrLW1ffciYbvfR*Qdq71sKFIZK>vIF6%{E$J9gihCOd*N18-+@T=u1~d9KX&IzdTIX zDY3PtB>|qSBq)RpBQCi?@;NfiMC+1MPB24%LQM47z|1iCV{1=fT7btLi&vj^>7iyJ zf`n@BL%A}N0vBz#;-&3Sex&#%co5rZ&;Ys3>^8IC<(GDGlMIF+VJt`~;tpb4la*52 zP1n9`%(5lq_7Ny=a#1G3#RPFv+Vm6DlzbM?KOF_=WG z44_x_#HSOGtY5*y)P-8((+5LC{$a2JMrq^0i`@I|EAflXMQH%WdS2+CuwinhQxZA` z%{l43GDMnG#H;R+jZh-WN68s0H(~fNWbxo-3-TSE6Qe_%g5UQCAk7Po9(gdE0c&l+ zYN#*8C3iwh`;rLEbxn-MBlGJF<8|l&yvlcgZ?<_^)$363HWTGE*ub37QX$9tsCGdp z{p>bq?GOazUucUmIy=q&h+~xxzmuVgOTH0tn7ywAKZJO5C!0ZY(bMcd@JB)l@Zw9rvT_k!OH)o7S2(SX8469@oeN%s1 zj0l~x<=|@&e^cj|EXU*S3;_xAr!o4V=*wD1mv;;eTZ1v#V!75~h>ld)GSd5K%BomQ zsYo95lK#lb+m4bWd@%sy_JUC1o!;RO^ICzdvtOO$V+(sFPcw$&1V4KnZzxL6fF=FnB8Tid^7=# z_%1x*Aht?Aju_DsI~^yQeoxUtIrkL=^)iH>d90b z@T}MdQA0uwt^4=do@epG3=r_a5MZb`;9++WraOEO(+A%+lvDFf9a2yWw6km%I0F2- z=TlWl8NQwdziaiJE`E|~@RdOBfMJv4Gm%A3ll`@>-=7!uz;be7G%&0Txt!~J4t_PGz~6qGGwBj3}6DLpnXB2mDD~r z!rgmg^9|SpkVOH!wO%ZulH~+)-Q5cygw(&Uar@m*BMF0M(uMLH}Gx_A{WsbtCtTbI;VM`V_pB0 z1>E4wAxL)|T7}!W%Zz|-on_SO9OHYTgLtE z((w1WbFGmcm-sJ_K7K6PE7bR0p@pxt&~ZP<<%Vg2krFqw7UFWJq|!+X2wDYQee#;5e>Su@(MauuQ zKnHCNGXujdYFFcw+ta5p3myZ;jyEW6={d6s&=uzD#K>+Wo3e~zjp{4f6-)q!CSq&?gD5@gT)6jR7@_UT; zsL>eN4wNe3A9cf;h34X2C~~El8A;VS!o(jDCjM~`+aZjlffQOnnoS@_zH24%Axr3R zrQYxL;abgm*k5^vx#|%W1#r?eu*^oI2suswM;fDAfUgimS7>m4G+eEezUw+%4)e80 z=_DN4K{#E1ZV?Q}KzFXNA&Jpo?P$(V!D_$bL3JuB{?*ap0+&RfPJ@nFiAgbqCV^2& zXf+6Q2GD%z2WG&pYL)!ni$*>b{y+Dgl8NF0F6Tvn<)y<~O4nuoZRqb)Tq4#0Mtt zK3BumMc#GyQQK^U2|g3dlZcGuV<2Xg6DgZlPNho#P0q%mNzBXRu5krnKOe%G#j9{6_Kx_S zJd|2L`@K;}#M5xVrc7CA4GQ=k^YFHIfcq3|ZqpUo48mWm6c4Nh5`T>p+Hg^0w0{YK z*J}dB;lD@KlQu1IEU-8aqkf8t^=~}|5*y2fu-Sl)&Zrg~(JM_+dHsgp{!Iv~7rC=) zj%2WlAzgc#iyZS&_e4})zJ869;|7IEqioFy$Ps=1(?v}!zC1nl(`N9c)NtifSU0g= z*<9pF+(7aJ5t8p2cJ^;?9IMJ0VClI8LW1r(cmryJ?Crw5PHE5Gz~MarNcyEn-g0$* zc>D!zy;IxJLxw+}WWZL#!0bc#+Uq40FgnpJ#nUqRCTjfL;zd$Orfg*%y_rO)2cSru1wfAx@7V%{BsEDk!@L-sd*L`RpCyunOL8R6{eb2? zPYKO)W4tW5h+11N>fNb!5&pUOd3n6Mw=DdhPD9XrN$?SYt(THs_lCUOkzqr<7u zkG5k9tQ5aX(}uZt0}D$xt7Sdkd~x@cr)C&8!>Y%iuR#?YHspCOXF zn62*g zxpd#QA88g>k{xn!_gc$d5wfxJmS|7VY4=-H%1Us%1tNQ}eYT%x-*|L-*hNjo{?!)- zqNZW)?_6WO*}3Qg`PD!ApbfSuBLRi$3WL(>nC3SEW~d6F{0NqEVIgS@7>CPiMl3|KH45|<9>i%H6u0sPiK0sy*Dpmku&UfvJeN!U&Cpy_{NXc zAkor0if2ohx((JzZ<{6H{Le<_N!L1{=Y?OifyP^Tn{GF(asZ=-sn za{@F1`pD=d_@Jw$i-$x0BQ0q~2xu2qXXp01pk64xhH#?Q9o6i;B$^C;2ck+FkM)cW z=yP1GK;k?(nP3j|X`C$dqACbGZ=JqBttPT2>Uvqig#AFM&_N>SSvz{vK~NSm^&{+u zWce52;U#Cw*QIw>&|Nz$>0Z7+%sMy4)~|5L*_>=NaNV9MR`A=`u0oaF3dfEoo|5my zi}1ojh7J?7Rw_Zdlbo((&%L-WU0! z%FhqLa-^ZsC$MZ$H(!wUnI_iK6c-wiQC5G>YdxMgvN5h1*q8Qfa`7h9V+-^{i_4)y zv%zmtM*jjM*Orl@h@kBP^7*<{(VT|Kz2e3=a=S zcz&lC<+;MjK&Mi=`6MtNAU*8x>Vn>5c%53Bjt>pFQ9jr|7uNE#8aH~>`1{L0Isy*R zEwXTj#zy$>=@jZ}oY&UhLa}ilm1Y*kAyWrqE68c^+nU3?a`FtFK5_!v&FD36_m52{ z`;VtCj_3D*AJqraGE?+46Qsq^2vd)~5>+2Bzt3O&6XP zgW;p>zyy5bTnHy%dP_HukrSMUs)=eXH|jZNGa9=-R`Q%z*_V1J>SY+G@c3N~u2Oa7 zAvD!Gryso<5wel?`)Oc|?-NRkAhiQ~jfedOf9St3EhGoooHMD*M(gwshduC=A zqR3!@=KLUSEcpVsTl5iUy)g{*gZaaC$7Fepp3*1WBabtpK&-BlP=}VguAM z-1}9UzGinha-ZUCo<5G7nbJ`Qk4vh_&Xr2n3#f@a)BCg*0UG$$A8hmb8M(Qo!m zERPhZdvfYLe_KHOXG-zk6QWF zeNT3dBz{+&jdU4jEXvI+ur(U{5vXDu-9Y=LQ!kBP^HBzKwp2%$7=E|y;4#{7Sn=GO zInWPoy)!N<738%?lJqekzt^a#(syq`USIrl6EF;g3n7su>x}_GLN6&A2Kwj;5Jzl^ zxQ=s~#Q}&I(`nxeM9B9J#!xG_)y+)i>hGQ)de`fm7+ES@)C_P7qu0?zo(=v1(U~2JzTp@(zAg%ah{S<6yrZ_caobBpf_Uv0g2kXBr9niVpEpIBJ% zq+q5HR59j$lGiHT5+e#v+9Sqa=IbbyoDOp0yIHzr3ui)~tuSlVe}VaGuS33`S>MM0 z%z^#J;?5p&5Q-FyoE3Cll?ScX)q~%8oOfBm-=zxK0i6=ek+FwKMffZruOKxVMIG77 zkH^s7Xv|6t!E&F`3~xBO*awhi0jZwQfEY+E;QmVz=o&(X!c2p`4G3SeJT1@AcUcS; zTjnBGUA`*xInK2G{zcXKsp&^G0on(=xXx>^3?y@UWpWHG7-I!+q#;E9)ao=9d;=(MvY(n^u#`d!)19R!?vw45lgFkKPi=#*aq%9F zPz*n6Sh{+UsE?A0VP{2+_X z#%g?%(dC~aMI)%Ic64>mX%|W{SE;nJYtj&$I9tcyu|*wU?_7BCz^|NvQt`EYuXe(V zo={#6I%lR0~v-d@%CS+079cM(qF;E;J znT$c~fx9xC5zL(DsCB|`^{r-?e{~VySnUI1K9OatuR}7QhA)sYgA z$^0x`QN?1k?C*oTlHTGp%9H4#4fzUf3u`EM4w>1H#2pA?F4MOk3}|;yymuNMfIp+E6-;d z%|fxx7Z9gDK2OlfIU*L6$V6f%^nsGllp<;3n6ssG!fbb}GoHjlgsEvMCD&Da-V+|3T?64e;MqQyHIH$53jiT|h`?`OmodOgR0x;j88xSp?k>K4=e z_7h$3D+SZRbcP&#nMbxx{__3-p0Ci5itN5p^a}YOtR9PB#Tq<%WRqB*^EVfumiouB zfM-hjY0u(5N3Ft$9^C5BxK!3*!CDZb`Xjmf{qStbrb8WzH+K7RGD4oYvq1rve3Q zLdluEnoXgwpAYhL`%vqDy>D6x{WYT5Apb=mhy{riDW9I?K`&~Ei6@}B-l>Q5j;xgD zHKvM*n6KTV&qvJ9t6y;YUbyp8p$E5Bm?JK~u$5LB$rkh1agHFDmy49U zT9ss}%F9Pz+Cuhb<#VJc8J?<=@mo@o9TM=H6X7iq2OXl#oXQ>HB_E;jy2+R{hjJ3! z@|QtyD|CV;=h z${<8HSgf02)=pYwjY?gnw_c|22~+^b_4cJUpWS43l0we|+3PQ;Ve;3!ZL`8CY(7wp z70=SU%w*}nzYlr_A{m;L%j39S)<6{kqQ@0Uqa!SBFKae_e~|srl;7d?ban6IR{D?K z@v@R<(u>R+*RhFtHRL|WGHIFd1$T2K@3LHszKeH|oPyeu(%~<~c}x^2(2bJC@69)O zB?ZlZS;J($UmK~09;lE#y>i87CBbmCz?yfJnu9 zJE$Z?f(*n^QUa+ku@oZp_-ILLY?WnWsLD`Qb{E@XdY+*14CIAJ;8ua`6-#R#Q;~f? zKGsTNt(Lb+m@TEc^q^D3Sbz{%oN(3i={jxf-H(!#T(PiX{ktS}Zad}q?}Fi1v+s1< zjX}Cm5ZD!`f|~?+F+6Ht-#1`=8WVE&0sk^wPmv%~OcVa_JHi$F{wKbASi0#>*1s?N*<^_KA}jkzoK z)4EPXENMT^9h~2%CpQex9ozF4dWolx2WJrUmXThpxRsZDIzHCjF7j!CX$9B;?!qDx zINlVKVaAF}l$U4e_B^-~?coHP9Rcf|KSK5-13weIq+cYyr~}D>Y9;26G|P9rL<&H% z1O>_GxZcyBeEQ|ApE3Ct8&4=%GyS?pzW6qP9`m}+Joro9#}`r0wH;#|2oj?-a>`XI zbM?RH2D7@A-bA1sM9_*#JkMP-ods3i-q-smFSXJ=h5V~aza!&i{g^@n@^elGM`Erg zUhgdHVnxy9%72XVkm9|X;W!XJppRzq%}snri&z}a8d5USpW2_P_36<44dgr;79t>r zFXQz28JoX3ecqJS6S}hd7Td{v#8dF2X)1+YrG{GdRhe8in|4QU{7&v;Ad)hV>PrJx zSoImcz5l*;vn{Y-O^PhI$SG8fn=j{BsA!kaw&;-pOZrLbR>%+DclVFghri|G?&*<1 zlLbjiW8b7NyWr7Di!)+K)=36}UO96xEXm3sRd!bbss_7Y;Qlqq@j3=MZ17XbOEMnY zqQ!{Hx9k0L38wRW;9bV_3xq7}9f;SVn^HbXaimTzUqL&lwGuF-+8XTuxR?R6QOiQq zD|eF$(9;>S*cRPalG@RiF{ZvC@DWG5=|iTIVbEj-jY4@GV}zy7wKqrHFCj5gIQrtk z>NCY$g%*;`a*lc2N}ycE$d$e^m+Q&3&v5`@RP8l0oueuA!F7+=2It1 zQICkQO>l$8LfAjH1!LDPVX|M~j^U!z@+St3WY5j`EkJDr74A#Eb#f4ojk;LTzlz$( z9&UNY$N;$uF}E&OE64}^LQ2N{HGcU1krN08Qc6=wVM@#$4nsPCIAh0FJQWqL%Ofcw$fEAsR_W0Y5t75$V_Hb+fqt;kT0-*5 z*Ax8jNT$|+TTX)mq48r=kR5)!Im2G%7Q=8|t*7R?6$_@DCq7f0`=YFNb*ZgZeZj^u zjiAkQmY3k5hTSt!e!w+WybLE(hx|w3gXc&A2jU~DG@ip^J4qe0Ei(j4whK7rG5t`G z>ZFvAzg#!xZ91S+kPVNz=t5GUx3JJopm{0zg>uDg5C1NM(OkX4=ALMzUbpmU5=kF2 z24P~*4Eh=6RnzljS!`tK(}Th7S#J9|k-NAC?A&KxecXwpwi~%w+ZU+dwXslsC>JVc z9@Yj$N_h)8@u-dt)|W7HtUzBjrCMkug+pOOOrpZY<}-a?W$cN~LFn1Z_P4U<<;F zMj=AeV{@IDNI7jj|MC4@k6FD(r#>~^KUzJsp;e4X*jw0y@ z=B_yzU;ApxaLeVYSiO-h!{PcB5o{5HU{3GMt6*PvWQim9!vGX%X7x`0uVZO^jrE~ii!L;iK z)T;{evF+ZcF57%}&5C5l24K7$?Zbryc)`Q2b^7+D#P>gTqhG}DcPTvb4-#>gU^uPl zZuFCh$9>f6e<{cM1hx1FtG403;WtUAEPZwSr~2$Q5yY6osarfZo&k&mw1wFn+eme3 zuW;3f+>nUk=w}S=pb>L#$%TstQA55ljY6BsOp5wrp}eW!%RPeSsbY3oigaFk-+U6q zK$J$#7L~)E0a%S+h{Q+Qg(m{CPp?bGZys+>3NM+h$|)qX+}Pk0+dB-2PH56-RrNLU z4RXM8?9)7Dcw{@G>!e*m=RuyyRD(f;vq!Xb`0vYHU~aNYl-%eM3f&M96HN?BumKIJ z!6cQYrXUR?tknyvyf0{3AlH`DHl*)QCV8zayv4(&CzXSUjSEuYdNX$YHfK>&^yQK` zcKv3(gq7Y5t;F&43NGBAdL3w+(So8lfO)?nCipHJ`Q+!mZG^g*uIamEuZ_j2I-xCm z15-}b79dJ*ZV|<}CNj&f+30)|O{8>T4RQTeCt5 zs`drrFS`$ITxis;o5a5Oc(Q6m>^y|WP=O*4X2T&f8cIY()7XlB)btd|b6nFZav|%E zM5vw0#*trdUSXg>JW&KF^>a2+eyv(r7I{^zG}li?7fM{Go^ta*gs1m}7@ zMHY(E34dni;RvjUZoe+EiJ(k96==MC(qu@z{PbN+#>I)2*rk<_aq)+{UF6k~Zcjw3 zbfd&VaoE@4mU@%!RoYH*A6yq$STIk->^l+ImX?&5GMZ@_SWw(YW|sK##R5Y9ZSY-< zhUVL~$W?Wi)*Ek&=znWa%8lCHKkL0Vv0T=uIqGk+8b%Wp99)H0f9**PqDtnRpEtMJb>lO$*4SMla)saqDyB}yhlDs&~d32u6o=qNcV0t)J%&_`v_YLmHcJ0 z#^tw9RKMRN1_m-N2=E9z3kV37QB_LcQC7Hox)5+ssG zza_Ab+8UOLyUSgXo`_=c3JE=zI9eZ3J#jM%WPT6T6bqa!W>QLHdVBMvU+m4$Wfvpb z=$IRk|0hd!@Vr7;$Bn{yjNEP)WF!<&3?GS-WwABthHv-~WrU;WHmZle?WG8bp3KyE zF#fqQ62xGWQi5M~(%#;J+o#E~?CPaPbBJM%jl4``a=$DL@3j`NDi3Th=2F|afQyxH zyn1zlb^8n)Oz@1hSAE5*e+B1eGvb=#QlqazymIt#C6Ngdelc6}()o5w8xOt-Z9jXg zQx|uK^5&#g8o+)lU!{P#@n8b+U=3c7MjiufK%yG9f|uy?(z3K4b`FW za)u=T5j4VViET`IbU_iMsWJclCyi0NNiQJ0*b?_qYL+cVzMkpej@yZ7afxN!?EJnA z6u!$__xV7=e-Gl6mF6x42~Buf@$>rO%WLxeq_T>MH@K>5(O?ZZo^{y>$0@biuR^B6 zjLs}4omc4{_mU(;-n?(k;QzCVg<~WVhZW@iDK*%ctdvWT&~0wz*R5Z3ggp~7+?gp52`A*Q8n$91IP>=`Z!MT}O&VBU@7c@qE`p4ZAa zDEN;IONgOSluQ+cpe)RKyz8+&%imI3>4b&7T*i#>uNF0Piz;$@gJ>$pUx_YVN zRh_=d?Y9o;D3EwWy6J%#>)nNFqN&yL<}+HwmUrwA3YI^*@%=p@PwC#?gQF zcUG@5f7r`vIWlR^Un+vkpCyBoT>=5q>)||_Wqnu=gu04Oh?5%%Zpz$i#dvNQ{w>( z!B!BF;}vS`4#8y#fx8hzr&s?pDwHqDcuMblx$yy1{|5YgxjTre|5Z90JzqpXf zzA`#Dj;PhL_Z&x}RH|Oa`)a#yd%oqDjZ6zfk-;@l31S|t98pe1sNe5jh#2;QLP0%@ZJdHl}_Wq*_ zRA`LAwdqezS{1q7^qUvN1|pPvCZGEdLqGEYb8f3CgzV>o4}>p&GoPKktXL;APD)06 z?KAU%8un0s=XKq%OwU!vtQd|Pr46oKEx125jjb8f|2;S-)#hW`P!t;B=>^d7?SsAGTN%{9 z1I}+kJLTZ!cHJ5v2^bh)n2 z(_>0Wh6qTq!_#z*Vr$ggtixz3az6-Sv)FHVpi!iX)BFGoeV7zjAwwi%)`>k%Ui%13 zkYwRJ)<+b&(aL%+l$i}NgT&@+WlOUq{I7k9Ekg)JjBYv=oG~)Mos52tX2wlEVT9wg zYTG{Xt6$bNcVL`%^~=o%(;UP0;F5P>i}Z}DzKfbKxc#z=3c6`4x0&%fEYnA9Q}6(Q zh4krzwar^t0_<}+I`i}1m6Jg+`0G?AV@<7x{Y8BIiDvh-5ciOzOK2u6znL8fRE}$NzOvZS!t!qs*a+4x z?TH=!kX70xbQ&tTiWd(8$~am}ib9G?gP2)E9@ygp>7$E9du@^SWyN8LCe8AGsNM+xhx@;~o59;J!YAG2v@UWQOG#S_RnJ-?b zoZMKv+*n>;Ps1`j_%g-{>8sB^ZoXlXd-A`NmyuxkV4*i*E$xYwDs1m>D~gsA7`}&O zYBMOcS?cU;QJrzCNN6}<2h(RUSosJQ9)j{VI`67wtl`qq{qq%F$yS0q?P<;vy3@v( z)yaH8%70ffHWQ)u0IEo~jrf^fq=LF-B95h(4S|C&W(ZXm!JSekDRR?vPL z)4@&6qmE6y+HE!rI(KUm^uaa-ru8M2g|@H;BX*J_t%3sH(`x*R%!CJqjrablR>E$6 zV4@(PJiAezJ3EINe^tF;g4g-(*%m?XVUj~*5IE2rIt@F)}?+_nVv#c8v- zNt<87fT;02yTD`ZR^961QIvQ#CClkgidHney?Elku_W#=TYZb0Ao%yX6c_l}IM2g- zM$#3}=@;I9CAL0~bf#e~QQYc0NqY%V#&U3%av#1EXn*TTrbvSqGT|i`iai3_jBSKf z6eUSnijYu}khsNblh!>(3~g)-erf{wT{(RCy*U(BgTTpQA}pad`9GGX|2i?TatL-P zzta71ralhN&}dZ$o)l=emZy_i@V!oyoh|zPKTD$v(3#mdJ0+@bDZ=)pTT8=n^-t89 zamH*DKKzM#a@WiI?SmrZ@(em7dpfG8-v5MTw-Bk*#wrv(+a8T6{_RlK`R-*bC7a3{ z%ts)dhrbBI zw`tE-xT;U2CSK&NKEjJv`-PC;0vKWzbrXx%t`BybJop*Epz#;Rr0BrUW(;~Q;T6p( zf5Dx%;-=u5L~S=^J{i_tEOkdL%{Aon%j%6 zpKeV!ajX)XkJ(T=MBcTnaxD{dxV5aR;JDlg<-HZ6(SLeAR7<0V zw!IF7RAZ~EnPQ|q{qZ3C?0iA=n1@CxESHW(6ME9Co-4Wb@4f&}_)5Mxv8-lIt9tV9hdK>=s-1 z%*%U_vkHYi+6V=R@VQ8s2CX!A2X$-=YSR0^x`3t-8Ni`VyAybGxqGcqG4SP>iLcvA z%H-Au#Zj$#Fv4;rn_Zp0`zQRxWHdL53IX{_f@ZRsG|%W<*cr@u+U%0XcU{&G>m|#d zJLU=-=032he=zts^q&Q%!0*mqeBq5@osD~SFbMZL_#`+PxI)ko0o>dQw2Bv37wwoK zx5k4BW;}W%ozvtvAoV8{vSP=0>Ve%9(TJ*=nBckw`(3}(Ng9J72VeM~{(N+*`1h!s z44Mxhn{zn4JaE@FR!RveE7S9ss`?l?8@zg2<(Lk>e=Q9KEesc;VqEw`ZS7@N=ce<7 z56QP5q(;}ip~rZICw`5u!~0TR|6rg91rPV{)0tzR0BvC6#&2%VACUZpx21hj&%pDo zg#8-?1ZtC~uX-#a*>v&7nk_BO#0+RNZZ%~y3}9jUBlt0A6DDp~O^yV|`CUE*X;9f8 zh*;5b{dllHsP4BJrKL&#@9ZVS(2H^&A_-U1rRlVm{>=rLsi91z28Ew0kpUbR^rf|) z0+`A0j;>h)g&Y3Pw+`q93lIR07b**%kXj)m4sTDg4`gN?(8e1`zdd<>Z#tux9i&?nH6nEfj4e9^w^1dMeDfLZAXuGut51|zwu)m4h;^L$* za#zaq_4l>DZBPkBM8nw(?9Im$-_Y0598={S4sX~lK1KR>PMn#@hu9OwJM7&6+3eIJ zTUSt*G$5PG@|v#cqrEGosKjLF`c%f&a4t1rv+_^9&cR5V<77DaXl;pWxNXrRnpQ-w zMNt=AP66nsRTGxgFO12T*ie=?1OJ1i=I^1K>(JxwhS>At*17}B4-I;WiEX+sF)+lA zV<5pYkPaEB@tyTe1B>X4Kl{N6=3xma*Ru?!Fd+bqvfXw}JJW;;NFo0@_DLHKsHQk( z_{$BGvC(u}xslAc|C|#F{9v$?K|(Pz21br-DBkuw=F}X@&K@ew68dD+!Ph~-%1;|m zcLYiP2s}D+%Py|J3K^gS@J2kIdB$TTmt;Y>TvW)Kh3F8NBInsQvpE z^K>XW4-Rf>mX*0%@)YX}{sCk)7WH&hRN6}eVH%7G$kUIBpU6JE@E0_^6+c37R016f z-gOiNyCO8Zd8X?Il=9by!m+}ir5F+Y=0g>1|x()#7Yo-0z}|m%Y_YN=vY|TVkA868=U$ZEieF=?-A1~ z`APzm`zgqstuyi`qm;3&u_IGQ*{MWb6hF%@_Ge&aHJG!wo2**%cyhGH6s6ocQp(E zKCzcEu5tSy1vHzBv^pxvN}Qc-EL{D33T1;cflnG^oJ{$$NyaeeUVzTGzO?y2ZP~1W z);+-29nJT}Z4P0lI^Foa0P<0lrh~F5W~KTd+$KqB?h^9{RdM&*MFQ^G3!EnmQHuS3;(MR zmjThXC$Mt`a$$uXp&XTQvXIJE8nqMjki{5u#7w|%FO(KiMMZWyB&x|Kk-hd_0q36E13Y4}3oJvV9hDq~RD*Vu+x#&^wWyr@0*<`5Y0RW1E@9^xFP zvpvz$6?{PD;rN$4cQ`~)quYGOJM}7+yj<#w4QlKoDIJ9Ik`a?6O`1kKQJ~e`1xAKu zRz8;I`QeKFk9)r02bynxKfC!6BzCJzwYl>BosuSUEzPpi7V6hN%*?5L!80=4$aN-K zA3uesMl+cdoV@~?ptQ56#Ct^qQ@^w;Hv5f=9Y1#s(D)G9jHq~WEtkp z@c?{rupErC7Tf<{Q0M*Kh1%WYtpJ!sd6z{y^BL2+h#G-7<6ykaIwh*TL%XHK1}e|! zZypi-cY9Tg)nMw0Pzz8)T}*&}TNHJ|UAgkv-9Eu?@7I;DUo&G|AW0X-{q{Rap)Z&1 zNx<}*=qIV~|57)x(D)(q!$IU__^tuhFZ}wbHOEOmtwIAN+XtEm$4jTevaV%6d2qf!wuN4MeaeGuE8Q-Ocku&%tcCLsVJ4 z%yqW^>vmf+@KwO2VTj;g&%!*+`K%EWWx9fT?eB1H$iQBY1UwP}%sWzqNz&r#KRt*3 zbHf>6k*n}8BD~1aniAN!sqFEKSF;X&djv%}w8ri<-xUnB(BD-9V0gg6xEu%HWj6NX z;4cTnuuvhIp7H+x2uy(ig9*<) zAAlgomi<}AgKG3AGt8^T-3&hO+sFw)_0NG1TNfhqyIuoy-zpC-?PN*%IBzkMG_^3#buh zjBA*;UiOHdgj=>2zO^|a3~PJg)gFB~PuNLg8%EB$X3}~2yJD^X!P7#I|LMU5z>hBX zjIMd9FFe6uron<2hmui-0BCjeJI#`-GWf9)qf~_-uErC7?xxz73y<|cKX_GLa1+D_d4>Va`j~+>I%KiHb`%(%QXw)K%GMwU2S(XTK4es(u>z!Rq~g zgn%B9qz|sY1a|B3clCoVeJpz!vq@0UT!) zI926W_(x8e*ECNux4vfVp0mquI>u%;n5ePiE3M*&0h-);gn+o)>!pFEwz_PR1{pLeL8GPS7Hcl5rb>r@4oGc79#_o4}S zqn7_-XCT7iFuuT?RsaiwA{n|vJNbEf)Jehbd_zUl0Gep|SX0ry%ns&f_W|ty{}AqH zB>HqTH)G0gcl{M_yg&P8+zMW9b$K~`Kog2ei+TpJ^&-`JF1TbffV||`91b7axU+HW zw#tTNx`$4U@kIWgHD;6qM;orPBFOAe1q*bNBYV$}x(?6YlbUT1%9P#g{{Z4?ZNt+g zFEFo;*i_ouD#%>aQxSBKg8@WmvRUyq`m4lo(7PL{QshQ_&F#*KIE4U%i1f~m7(dwn zTUGtlBuF+?k~;7)S`#5A++U@APxJR@Z_8k-cYQ9vi9r!6!HDhGrFB4iw|ZS8Y}kor z7vT2eeuCIbAK3N#YVHz9`YyNC<%{Ll!sUw9$G7)^i8cEJSsTP;;~`)%0YMRZo?tKc zGW8{5N~$x&?_kuH!FPUCIP zpj`Ka;{CMbd}FiDoz~ykR~{nj%(t{wzIMDO@`Xo0=2p9wgPe~Pn|r;dDhu-Pq;$65 zamVIycT&Nd`RQj)2sP~aOJ(|u2aT|7#wjL0tO1&YWFVm9@Go^?9FL^Ii1?!x5{@Lz zjqYLmzPMr2{yUmqA7hv~e8$~-G_3C+PaDZ{KlUd|`dd614s+N0d&JC~f-tKPeQ!{~ z|JPke2%77Qm|ETIpHG3$K}nVM4P>1>0^zsVFPmRSY-T91Z&g4j(#itO=q`=4BTMP9 z8o;RCN5n|)Z68*6C9^ok*Pl^B?KZVE`q@SWKOX%HFU-Sk@Moem?*nxS7NBp8%}M5* zpFBKrk-|BY!akB@P=o9-n64QFoz(tb^{A?Qz^Y}W`n+22X>Ix^$m#Pg-CNo|T3w=s zRJ~K0KM>v3E>wun!A0v0g4GXlm-o+|EiZkvkB-yM`YJp6pzai5Owk7hr`-Qm>#z?P zZ;?}*W;_*R1!N7q2?s1Zs+rK;X5WP8Ie=%*@FX@Zd=u`l#s^lzn_0S5^9rE)$+59b zbJ2o#?fiejE{?C+@>QL|_AmdI^3y0_jMC!8!9Q6vHj{H;fTJOCe&QD zuN3(=08h~XWjCvGyEAGm9zgjt2L-i1N~Ff@fr=WAs$K+OoalWQ??Nno07@+8YQ~%K z&nbb|cjLpXk$H}QB8Fm;#2IJ_xLSMdbFx*6{3`CEnaN~qoW6Q}v?!hTpPys+^d?eq z2SyUix(RD+v}iXKvKwJr^(eFth@e(J7(j73NRf_@|A*CR)?nvabN5FOvx6zZRUU5Z zv_n(z4-z|X1ZI|CpWf7p2nFM@@fPc6(o0MCa?jNM8Q2pa{|Ph!Js4H~k>V)hv|@E2 zbe0oS>+*iHD~&sSjP}1Mx*#*~qj1<+3#I)D@Brt6_7sO%$cyoRE8$-tX{cL8fm3V zP*NJ{lx~o25QdKb9)0xjcm3b@^ZVuT6W73XdCs1_*S+qw)_smR-h141Qh6MqK66jY zX!YWj`H#=`#(tf1M>JeJvHEV9VDW@Qa*Xj)ICgWd7P{&Kr;0%13iF_2H=r?&%4@_W zI({*Vb;RpI{p~r~(SZX;51}!T@^Qb?7iN0a-76gy4tM$#uM-X=Zg)rd&qz_bHB#_7 z1m+I=a4h&Y>pEoo;-et~$Ns~sgONA0~0`o#;y!S=jME${kfXrZ27 zUFGrtzcx32tq$Fo}R*P0z1Nk9p;#|?g=XCMOkb&5`7pxbxthm z`}a_;WeA7yCl;EX^VQ@0=3;h($h+kuPXVt9B3Hr1ZhG?Lfh=mX7Jf+{befbFCZaPp z<$)L3gA(=}5W&>h8V0{ORm|$LaE9U^`z3RqkZgA$!MkUFWGc(Ctj-xkDAG`2*y&GW z)TN>{Q&(&+$w5zC$?w?nmn4h4=T?OlN^lV@2crX+Uh{83qhx1$K|Q#_C3A;7zr?<| zu!>?h)Z(g=82a6-89}Bb;lJ=7h~iZ(&z1xP61Lqg(96-|@l^y!)3Y(06vb5ZeFohm z(5j#54}(|8{pKa!Xrv$uH!pMVhbUPaFKW^%Nf{mJtl&#QGyWS}7+8?*VQ#1)nYttE zQDRYjZf;na*7b)kSejRV>R50Noa_tz(6i+_$ud6aO&1*2Te9Evyv)i}MDQpR>ofXh z)?NJAV`Gy3L_cN)kqD+F2yzxLI8!aP5DwA=`wcbcBYh0bjay2rdab!H7`QTU11`_l zf!JGYu7>&&W45xt%t~+KHACBviz`e>M&daZ8v&izY^ScyTwYGd<3BNOyQg^H@-X5wWnH_p@y z3_5kWZ+oG>B)eaGd1<(6Z6;iieYLSrseaE;b3|q$|2Id^41v!|hvPF_Jq>j0wI&V^ zI~w0(yx%!{ODo^YN_O;Te+TXPYqU6j`?;Bg8eWMV6$jT+*qm$?FG6unXO8NRe-LoX_a1A#YOY(E(GNPw`!H z6M>)_nr)a*l|26^Tm+I1wP35gwe50Fg8+kqCg zm=2e5YK(8TFQ1R@Q8sCppryaR?e$LZ;m^7xcnpLvy$CJyrD|GwZ!2rron~q8p7BGE z)c5!=58W{|tkJ)(bA;Art#)~OaJ9Mo7>Qv1-CY~%ASJ_s*dV_QI3YHM_?P9Bz_lkq zhi&KB6Nx_I6tKwPx>X&mQW_(Ho*>|718qxQAnx}!8E1t$<43n5=TeNb1+DX0v$|8U zCIoBx z>5ZrJdU@R%RxcNVBmx#rC9pnRPVS|jXvILu=bh;+{*jBr_395(_Yb3B7$zty;|=X< zYNflgp8ls!jL(}Kz1JnLJuF`4lvzt+Kzk)W+m%eq7w~(LkV|c+fJ8A;$`l1)I!1aQ z*Hg;eIYdBe&fNf|hq<{bbmxPZ0kLi=VZ(4N;P*HHT7o-nK9uNO(pV==Uk>>RV@W`$ z?GX0k9zDlx1WFM*gv7v$`igl9rStWD`5s2U9cPj7khF{Z1y>so6C1iKHRZT#=a?7({u6^iI}( z?K~fGLoyzpKl8*EWpNOX2S^96#r8){rEPpaph_`c2nDQ9prNJs8088uP}>o)s8u+j zJ*kI}{5@(|DxPR|%?_`+S1JiA|GpL$b->W?EiSg-0MTJG!6?UczwoO0EaAZ^on}&& zNI`l#d?zc(3dFVge%)N-;$?Q3?@0{4>e>Y6TfQrPO#hVV`U-D+5+`Tmv751z_!6{* zdKNmy5mparMA>zAC*GAW3e1i}tuE;pBpyb+|9s~Zx9HmMEBK%2k-~d;@!}l}U4F*V zGf`hfS|0_Ps@>2J=;7PPRQYQ<%`ru{C#Gd`4Vh*VmVZh_K2{WcrXo5CAU=q<-QYGm z<3`)noi79Eq9S@+v%0PHhOazh2b(^DpxNF#A*X-;Qvv|K?*c;_8C793m?j29T^XM- zjok3gvKXX~7MjfDKW|xC5B17eoi7%f_sF_WxekwZ9O4D;A&(9hN94UGk24Y%qDj=8 zb&Ty90=Hv}QF+byH&7r^5}T42-@l(>C2My3$M|oR-W%;L6DTh4i+{W)ORay2-8RRV z-EY*EY_r#g+B(}Fgi=bb8xJg`Jx&k4*=&$^uq@Np%}HNDf8S%~Bly~A=kw1)#{~tQ zW7K?P2D4enr@n53TWlRJhhi1VR^ErRNh(uGkyB8+m{srj`+}Vs@!lC6Q${89zmXb= zx(W$Ml#}fw1stZ89cD`QNL1~}00M1FR@y@)1(8=RjOu6f#1ZjCaSdhNROjO}4Y~P} zXSwA~xrLHetM;Xf3BRPag-Gux?8HN91Oj$GLCdI{NFs4g7hHw)cGpH-E#V}rq~NPV z8U^TLp5gd!2^VyNm-^BTK(qgE*Ft(NH4u_kO0QV2YF>8rO)+3DHbW1XTy}**-RF#n zhk(_2iVBKgaDf)N$LOG>+kM)Gye1nZmj96Iiqkbk;GKu-P*HEY9CtAns4 zqY&99WKRZlS44s2MdZt#`1)cz7KIvZ2(je%;W$-WC%ieaFd#cy#%k_CX;}ELl`(nk zJuNIIiZV14vft~4J2Zn1!Yhkr-UnuTH4Wm+Qd+#Uud9~yifPQ`1Tr1u)^=&as_;Hv z)c&0%GwcAk{Eo+>76=bqb7=9m8w&}6*~)mnS%g-V9NXg?_064%e33Wu>}VMfa%RtQ zZ8#5Njo4xK0mF$O>#tK8Cyth>UvzPDt1Im4*obA>y$DZ)yb5&FdmAkc<6YdUsCAK8 z?`=}~nqw%q7{!zq;5E{}s}U>$$*kwUE)U*_M4)A6X5b+OmqR6xJOlmV-rQjIye3Or zT*ZOMlHZfF?`(?08A4G~f_>&Ch8rcgX5wb&0Y90e5OXRo4FuBVkpHcOmA8LSq5jzl zF9TYYh#|W+CU*wz$I_kvb#($=^D;`Z#@yUjB{P5TkarHC=TSbnVuu3G3=W<@Eztl z*+7b934#`=N!>2lCV@Z1_S`%M?wSjXJ0esmGOVSY91V{K7<|`O18bHcFwV?cyn`9`3w=fD{1%T#G`+l;Af`+}YFsBv#U! zvoCg>Z3T1>ClhVp6<*i560p!1c6iJz_0Pb9B$N~P{IrXRSBm$Z3ic3tn*QjGdneWD9*Kv ztT?*)&`pz!GLgmbPm>G|*B=iC1Z4$PeYkEr($1L?gLqm=7OjzO#8W|1n<*0>s|O4h zWE+_B3d;}kswj-y*uWr|D~GeL&SgwL6; zd+w6=j|?*<^|!Hb2|Z`GWBPq93m-s*dQrLY?lvZ?A<#Rjy04U)fYL}Yyq1$R$6-&P8+LFnde zU3R&etv?MN*I4A-bW$Yny6pW{%oc4w&J04nJuVsGmZ`asZ_o+Z&1##0~$O1D*)`77Pi_M_Xd`X6_n*d8poE38rI6Jio`r$D!H7-6+N9HvT zQSIigk$1BrZaC;B&bhkLf(gGxsH#;@m`7SkB}xe};2Xw1#S`cge4-6LN2<*eZRFU> zMlg+4CDIait`l{C$l}zl4l=|(IbN0mJ-hKdXUu;zYRj_ zCY9cFbpAB-@CsX^MvrPmUW}yfC*4*uKXS{ZMmPruh#lsskHHSyeT)gsO;- zi+Mi2Qd*0P-lpc2*g+s@yWr=2tdoTvs(6};bxN{Qe6j|o4#VZeFe}gV&LGR6Uui*biLg54YHcHyH^4EPC+qv% zv5)>SE@z4r*q+l~5R(DVBOrJ$ns&wCF=A8^=(J*1r~%Cqa9Wq{#7F$xbqbcK16r2Y zcOyUqRNNnuIBQ!$w6f)|RY8}YN%1nI5dQs@C82VYyR|}uCXLb3$uGe?FUm=5vS9xDl@RkFqg46~VYV#nAfy0nd%Vyb#A=hcc% z1JiQlOO*(F7iM1sYD5$^Q6tyGVP*f8_X45 z496-wr#{SF_xVTy!}pQ7@_3QfaHw0RKdTl)RiC7WA0JG{NJHp1*J$=cf8K1S8vI7w zc<4!Iyp1RM5%$4m01b`IiQFY><#XMA$6NSmi7mleXdgR^e1-%mKSpQO3oUkO28moa zK0C9Vk5clt1z3)x14ypJrH3S19Bd8GkOz(t7=)mafQ*gfANoxAyjE@NqB5MYGwbHl z&3(9iN+qL8Vqy?6mbrB6a z8FKxxn&`iVb}F0@=_yEuwC8|p9FT-VJ22j;BwLmQHim2L%}tlg!$k?*rqTACw>+E^ zdJ3nf%2Ak^;i`->MmSftk6;@umBq9)iUhY+)t;cC{@P?9Z7WSklz=aLnC1pjKO~|T zC^AolUR~~GEoBBxYY@!%ogQJeO)Ck80WGHk7-BL4G7eCk&Q~_U#W-F5xA6WN6S2Ps z0P>NJVh@rLc_Q$gzyP<^5Oh!Art17u{z02nhr24GFK-=Y@g&7Vc zt~P7b8(vE4a?UjYn=o|a1~PFJ3m-L&(5=aUZ<^O2IPpXgLnAczdztuwUcszuAzs% zkuc6$DvX`Jyjr9y+?MUN_?OxSnrsq~qL0N!3U_ZAQxkdUKvV^cOg2<2V7(?3UMno_ z=#Co?+XcKhJLTn3<+@RKHVb(#=ar} z^fnzW-`K#3WiS|*(oJzpy3=e-oP!qUJs&GU@Ln#Xh*AkZJNuOduQ2yO*G)v4GF(lq zk)voqWA&`+m7wmwCn60=a9OV-8IQ^=6~;cls^&6Si75gRb_1nw?SPz03Xn;9Y@S4FdQE*b|LQ3fRznt$ZL~R4(L>|$eWyf- z=|<7V+=&~Q4;zuO8X|W(4O702$~}^;sj%1Ep6F;DQBHjVY~LyM%xtu@zM6U)%jxCD zx5pu3F|LF1_|LIbLQYg%cf&loKptX3oRy8MD`tdKP!<<&nJ6EV5&k$he5ZpS$OsKy zOE?(4qPVJR_+&BlAolnU;6ZXJ%0br{T#_;W2X((T;=s~l<>a++a6S9zmuc2WUbK=r z%KH+5Sc)+YqwXk1^1NVlvvE1upU+(U0mIq*8|{XidW2*B;rrX3)=RdUYH+gXdLd(W znOx$S3)jZYZ$i(l|D`i3vnI$SYKf-zHgc}R@DY6E-at^!adu)O-77MkvYd=eUWTS~ zw^?f!l!hkf#Ga8%>afC6ijW3z4}y&kc}BlRZ&7lH51bO8<3O!K!32$3$ryT(Anro2 z_i3Nl4t*6Z2(9M6@EX+Cxe3EgL+iz$BenIC$UhkiF4Cqnl~{j6kU19;>^bz=usfD! z4>Ra5Z~^^-3^>0yMho|T>H{UnnblSQjVIDO-}kWfC!e4}IusKP-~aFs%%`lRSxX`BIK}&3f?E3H9as<3slSjQ3<3Qwq%^H6-b|G#|3jYe^nxfP1aEwOPq3M zPjh5v)|g@E7GrDN6S`vSCLr?_9{^iMmz+)3yU1(b@3Go7qTQDS`A)0{4S2L9NbPt{ zHG2-Vxu+Io7mw3NT@R9%i#6A99tk;Q9IZreD6}rlwNUiCac&or&$URtQDP4l2;f4(L^$Vh3kLU`706uxCLjBMoPUu&WcWioVbIXiUc7P8I8{j12H2% z=hki&W@!~^^h~G3F&k}lzlBx_OPc$FdzA66ky{`>UrRaU8v4~}WH8=(gee-$f&Ct+ zjP);ZR_Y(Sb`9#@?oRXfR@XP85e=I&UBaE={)ofpTiAL`k`OM$frZD8S;NZ$N&`T4ewTB*N8r-wDQOLm(*}ko zcq>*pAOzSJ;|G%61Qrh1V8Z^JT=k%lyt2_1u;nKg|KEPHO0oaj6`El2&U( z9G8f3kpBWsWw;T@z%*KeZhzoIK9J!>V}{WPAa45Y5ZDDc`NzkcnA7B z1dAjgc2UTyzDAaUgj_KEsYa$X=*aM)C;JOKp@F zlU8%0U%+D?Qi^Y7yP&04P~{N0qaB=mEzYx4z>2of$mq>WRHKX6i+9wZ-ekW>4ExC|h&Ihe(-ojuSRdVrJ4C&!u>^oSov9S@ZYqmDidiQ%YDuLC>$E zLd@seUq?X}ZzVTH7x+K8k(mcTc)-zAkOR5=r0lQ{Px&*@{UyPefl6q%u3?$dy0nw? z;|2K?fF_tyre&6i)=6m$L!P%dB-eN^QOw6WSZAq};}V`*mjK!*Rxt!fQ(CKuX0Y7;HGAF<>eVk|hsM>IaQJSz~)lGk37O`*ge) z_V<^tBMiQTA6JliEtsO1m%Ne`pG^e4ODFM&5?HFKEwX7?CyWEkJKL4elFIf$ufcB9 zR~I9Wb{Z-4m?Ts9lUeiuhkar!chdt;=HAs(d>rU`RZ{H0o6vDnYmqnI`zjIcf-js3 zzG`KpY<_qGvNqUdwNHFEtK`1`GiQpc(}Xk#a)2E8ji>d(9~yXy7Is2>C!n~)40aHH zn26unoUrGu=9BaFNkVHJ`{)n5)ZLvrh8T{v0Jh-g$W8RCI=%p7BgXo-c(30BTWOXR z!xts3$Hqa=|6C02ia%ZB46}NQCL7pAvDR33DGbIIGv0n^jeM~}Ur~OJn_EXjwc_Wo zG!~px%b$CyrGo7SGAfP){RhhDeSDh^*R7y})I?I@0vSe2!>YHgr0}eQb$X%qrd|e; z5x!Z~lEWdF%1bmfiXF9$nrvI>;nf+E`SDGuA&JZ}zFdrRut5(Q$_F$1<(!#!cO%mYUuIu7jzN^f0L*yOh8&CM_pXE7)+E-(RG> z$?F-jFJ-dOV|hmukpKDmp_t-UV5!{=T=Ao4dxu7rA!9ZLErIElwfT*=4X(6Hy}f2d zPJYJ};*iN*N*VF&!;!-uc!K+^-a@ibeP}px@~i$f2^VVXE_-qi52<4PZqE#CZ;v4{ z|L&0unSX!0(UK<2Y3MUD;QBDACs~A{q=;yGAR>PB70&H7VW(@tvs6KP4{g^5UYh(+ z!FmtvzOZYFe|3>EQJU4R{(7+qTiWAUd||&C?Q^QQp8h4Bk-9_l=j}{d~eVCSHkyVqolfY(ahWJ$u7nWsNSgv7=F{PIq@Aqm4(_3j#NyG zccU1`7{GtW2a!Egz1MZ*g9d&n&MqFP(A&pNhAH@r)zu0Q@k^%$_fi9cz%WQEPe!JG z64KWSiA8OHk5l3=Qf9a?b9tQ$WDks4NFS`=b3$c8uQPUZymlQ4g5A7nG}6oC#Q`~0 zRJn_(>fLsegCF(&Ku)Y=;)sEW_fw& z`DOj!(XnM_D{+(;z>tD$`*>|z6Yo(pB=a()7gHTfzQ|k!e|G0yJWklr7X0LR+Q)B09!RXt2ZsLY8bL0a*V*Ug0I&)bH$_Xk)lt*Gh^_?| zh#b(bgnYW4<`*n zyt}B*pt{|kjfL-OsxcuB4kgJb7$-~7K}l?=FsI`~5vp^VR#LlTI;p zs68d>s>@G${WiAwMn19*hP{53F3TlcpIv^xm59%iTeskU$QjHFsnpDTUhYEH0EHSj zi}l+04G=56jm;;B?X}E5pupmCLQ)F-e3S_8PS3I>v$P{n;#@NOfCAR{nbB0uQaOh; zQtOfLp!o&G!5i{^r(u~Z7A{;JC3GoCHVUN5GQA@{FeueGA310w-FB$h^lL4F&i+;x z*{kb!l+O7cQ|QYQ)o=uP7Ij^&*Qu$R<>@PzDhipEadalzv=tJ%fM^ciZH%!IC}v5D zmvEInn0*41S9em5bxY5MM2jIMS$L}{?hyOK@Fx?cq<$V9)GPzT{kq1zl3o0q?6wfJcg;jwq>AyMU zd>wevjR$6>rXud9cwB;pJUu}Ez^yxh3g#(=2%DVJgM@<5WiwWGtxel>;gh`2?M|Vd zjxj}nrBC8b**o!NRZ1DPO5ZhW&^0{JuV~h=ci4eZ^KZt$4ppHR=x827UHSg_A;aL! zTj9O~uA7LN-GA#PaxKD<3U5Z{chaBB1ynCk@x$3!(0Rao!vhGt!Q_*Yk;rzG95gZy z#Dtwj5)A=+QsC(dl2ekGY{L4!VZ5alsDb^%-!;d~q_IV@Wce4_+`$*jQ=eT#+g@B9KN{X+_KO{( zYNIQ9`(hN)=XAJ>-@n_X3f&m$9nybbo+Ych8C_VKZII{pC6h#DFlUl#g1_6c=&!8A+UO+&Pwt?SVRs)^%Qor&2I%(8%Qo)p0{BndU3)ZJqT?6yv?hN za3rOw(KE2E8(t=LRSYoojZ__Z`cuRDhf$CnU2aZIL?n%9W9>L^i@8NWTZ5Vpjv@yO z-i{WqK|be5(FFK2g^Ni>-OoNJnkCHHgkJexcBO_j*!%ALyL5$cDc`$f#4rrsPcQwT z0+8j|*L zH$n=7d)G%OrHV><9NP;#VAs!~92iV2?sQ}7oG&}~#NQgFtVtAbqSnxO3|@H1?KxK& z_fC{#;H1~t_@HK_sMgPRe~hzvCoLu9B3zH#dDc)&to&%%i>4IMwt1o1lH~2veXP7J zre#@#!{m;S@}2GBO>VDCX~#C5i-Wqa8y(Scdcu^YAEH+Z3`WA#n;iRTds+==F1Cih zck5O$S|f~IcOOIfI-hpsZI?ZwrR&7$5_T%Ah3v>>xlV0AlB@rrOs%vI8`mrQMv(i} zr7sEo=#(`}mGe>)0S38axu5As6ggaqYbxb)#~K6csM_ToCLLLaq7l ztR78S1T|4jAHw&`H-jrZAdic;nYqDZ{-`9JvrpX@R=2J`7Q)fBkM0aO*a$oBy|lm= z-$|o7-|rn}d~Sc>P;>r8lts_0dve9PcaH01!g;WvqHb|`YtZssWKb+wFFA9orR4`Y z0|quRSZfqiGz{!7ny7yBZshw2SnGGUPq%ze=6J6ll*eblJky&MwOgLryTWrew*-qbB31}g;U&@ z@th8R^E2YqrFhH6_6KTR9mb>U0y0ISp1SlkNd^o}5i^wM))U6X#^y3F2HhM7*^IR+ zv){kV%%cp;*S8wbv*Fd8VblujIH@c0RIJHgfQlDZJHD{~bSok^cDs}?c>7FM=}^z@ zjqSlw^p@qYl7aqdC4P9u@k;F6r!#8PD7J0+TN)v@E$_cvfA;jo_p?f2cnqF;$IFJf z>st}iEsBV#AXAwBS!t{B+4uf9aj&apl$?*mJhbY99M_GH_Uaya1Rb84ba`w{YYCk> zgkH|C3iB%wihVu_`yz&Q(9w5PDfhT|9i97RL3+w~caeMTyB5|o&t#zo$Bb=A*4@1` z&1*QwRsOl$KF;ihcJ3o4wLe$>8*W!7ca1|%xDMh+oIItm>zq`Ldc~LPC65GoN4;Q6 z;trb4fdu_^rj`o5$>AUBPS>~6UzeJ%xzTlIOmHVTfQ>xzbtU;Uz1)Lw$!m4)q=3_7 zuV}6MvSmhyt~6x1+W9iN)jnvCWN(k#edl5S%nLXC?b_?EiRH};4lf{c?I&7xm3Jz| zTXeBL3Jc0Q=X95C_D_^C%O0MqLHjyfG&!WSZ^R}?4Y-WB52o2ngbP3FnYlC+Q`8dg zD46NIJUiqTL)17IKDqts*x<;0-+8O{WRI=)%q_GtMb*o1=wUdbb*k==lH&1EJA|3v zYU4}KIV|RKk%74*tS?J>7_}r>9ICvwJlzuZachqAj_rCG)kaO8na1*xFkgx#;UF)Ao(e+-eA1?Pj-5#9LNh&T|Vr7tWlK`0R@NQN(dPkyggGvjB11 zMC?wLx$U7+pS``kH5q$dA=Qw)>9n{}!ohZd;lJD7vkL=1T*55i#0mM;%ueA%N#?)v z4P9J}tEtq2lz@k|Aynt=g^M0BJ)Az*lr20qIC}*B9+P|)a>9D>L&5`0QYk-@zzQG! zx-&o|cj@O=6z;|rR7*ZDt^Bnj)NSTs_fZaE36)0$D$=2U#KdaQMJUgVf{SXgCCv&@xRb(Jqnp=Rc>)!Q|iat)sQHse~#dV~xvtgf^It%l3i zQ{8hxDKEz=1=ien);GT|UR|-{IGJ!7#(UK9wC%@MhrMq}`C?2QJEt>cca+b^TH}vy zb&Er!q1#P$nYE_BPdq9bI}R0{u7$h?@{^-WO4f+&7|DEn!*5e%qHN{7vAxSP!DAiv zP}WWnj81P2j+^l@a$h=`+c@jU-fHTvaMVyoRJVTV4C`L>jh?E`s4EtcB|1}ZUjMPH zX>7l7taU4kh|}Pe_Avq>wefU+$#!=3=(-n~yt3%H7=^e1rQgJX%w?I_RGTJ;_E;@( zhP`%ACOh}2qZ?F5Gn{@au%&-3Vtm!mPt+?}l9u z^>W)7Jr%{4uh`PIl%<245+Z=74@=8mm_QHQY%spuSwKTW*kFKEs|3#C>V*T3FURpNU zwJ)UcZ7vOKe5&i7K5YK>K*7__3=z2e1FhqS{#T9CQ^QAMVOrCQtBo&@se{~lC zwt`-S-M_%`r*Mv6;Swwxiv9F|_|*eED!PXw%yfU9n1BAtrwqW3HR2lk|KV4IXjrX- z2yD4Om;C34|8DEgs$&!X_ig - .table {width: 25%;} - - -```{r setup, include=FALSE} -knitr::opts_chunk$set(echo = TRUE) -``` - -# Introduction - -Here we consider a causal inference problem with a binary treatment and a binary outcome where there -is unobserved confounding, but an exogenous instrument is available (also binary). This problem will require a number of extensions to the basic BART model, all of which can be implemented straightforwardly as Gibbs samplers using `stochtree`. We'll go through all of the model fitting steps in quite a lot of detail here. - -# Background - -To be concrete, suppose we wish to measure the effect of receiving a flu vaccine on the probability of getting the flu. Individuals who opt to get a flu shot differ in many ways from those that don't, and these lifestyle differences presumably also affect their respective chances of getting the flu. Consequently, comparing the percentage of individuals who get the flu in the vaccinated and unvaccinated groups does not give a clear picture of the vaccine efficacy. - -However, a so-called encouragement design can be implemented, where some individuals are selected at random to be given some extra incentive to get a flu shot (free clinics at the workplace or a personalized reminder, for example). Studying the impact of this randomized encouragement allows us to tease apart the impact of the vaccine from the confounding factors, at least to some extent. This exact problem has been considered several times in the literature, starting with @mcdonald1992effects with follow-on analysis by @hirano2000assessing, @richardson2011transparent, and @imbens2015causal. - -Our analysis here follows the Bayesian nonparametric approach described in the supplement to @hahn2016bayesian. - -## Notation - -Let $V$ denote the treatment variable (as in "vaccine"). Let $Y$ denote the response variable (getting the flu), $Z$ denote the instrument (encouragement or reminder to get a flu shot), and $X$ denote an additional observable covariate (for instance, patient age). - -Further, let $S$ denote the so-called *principal strata*, which is an exhaustive characterization of how individuals' might be affected by the encouragement regarding the flu shot. Some people will get a flu shot no matter what: these are the *always takers* (a). Some people will not get the flu shot no matter what: these are the *never takers* (n). For both always-takers and never-takers, the randomization of the encouragement is irrelevant and our data set contains no always takers who skipped the vaccine and no never takers who got the vaccine and so the treatment effect of the vaccine in these groups is fundamentally non-identifiable. - -By contrast, we also have *compliers* (c): folks who would not have gotten the shot but for the fact that they were encouraged to do so. These are the people about whom our randomized encouragement provides some information, because they are precisely the ones that have been randomized to treatment. - -Lastly, we could have *defiers* (d): contrarians who who were planning on getting the shot, but -- upon being reminded -- decided not to! For our analysis we will do the usual thing of assuming that there are no defiers. And because we are going to simulate our data, we can make sure that this assumption is true. - -## The causal diagram - -The causal diagram for this model can be expressed as follows. Here we are considering one confounder and moderator variable ($X$), which is the patient's age. In our data generating process (which we know because this is a simulation demonstration) higher age will make it more likely that a person is an always taker or complier and less likely that they are a never taker, which in turn has an effect on flu risk. We stipulate here that always takers are at lower risk and never takers at higher risk. Simultaneously, age has an increasing and then decreasing direct effect on flu risk; very young and very old are at higher risk, while young and middle age adults are at lower risk. In this DGP the flu efficacy has a multiplicative effect, reducing flu risk as a fixed proportion of baseline risk -- accordingly, the treatment effect (as a difference) is nonlinear in Age (for each principal stratum). - -```{r pressure, echo=FALSE, fig.cap="The causal directed acyclic graph (CDAG) for the instrumental variables flu example.", fig.align="center", out.width = '50%'} -knitr::include_graphics("IV_CDAG.png") -``` - -The biggest question about this graph concerns the dashed red arrow from the putative instrument $Z$ to the outcome (flu). We say "putative" because if that dashed red arrow is there, then technically $Z$ is not a valid instrument. The assumption/assertion that there is no dashed red arrow is called the "exclusion restriction". In this vignette, we will explore what sorts of inferences are possible if we remain agnostic about the presence or absence of that dashed red arrow. - -## Potential outcomes - -There are two relevant potential outcomes in an instrumental variables analysis, corresponding to the causal effect of the instrument on the treatment and the causal effect of the treatment on the outcome. In this example, that is the effect of the reminder/encouragement on vaccine status and the effect of the vaccine itself on the flu. The notation is $V(Z)$ and $Y(V(Z),Z)$ respectively, so that we have six distinct random variables: $V(0)$, $V(1)$, $Y(0,0)$, $Y(1,0)$, $Y(0,1)$ and $Y(1,1)$. The problem -- sometimes called the *fundamental problem of causal inference* -- is that some of these random variables can never be seen simultaneously, they are observationally mutually exclusive. For this reason, it may be helpful to think about causal inference as a missing data problem, as depicted in the following table. - -```{r missing_data, echo=FALSE} -d <- data.frame(i = c(1:4, "$\\vdots$"), z = c(1,0,0,1, "$\\vdots$"),v0=c("?",1,0,"?", "$\\vdots$"),v1=c(1,"?","?",0, "$\\vdots$"), y00 = c("?","?",1,"?", "$\\vdots$"), y10 = c("?",1,"?","?", "$\\vdots$"), y01 = c("?","?","?",0, "$\\vdots$"), y11 = c(0,"?","?","?", "$\\vdots$")) -library(kableExtra) -colnames(d) <- c("$i$","$Z_i$", "$V_i(0)$","$V_i(1)$","$Y_i(0,0)$","$Y_i(1,0)$","$Y_i(0,1)$","$Y_i(1,1)$") -knitr::kable(d, escape = FALSE, align = 'c') %>% kable_styling("striped", position = "center") -``` - -Likewise, with this notation we can formally define the principal strata: - -```{r principle_strata, echo=FALSE} -d <- data.frame(v0=c(0,1,0,1),v1=c(0,1,1,0), S = c("Never Taker (n)", "Always Taker (a)", "Complier (c)", "Defier (d)")) -colnames(d) <- c("$V_i(0)$","$V_i(1)$","$S_i$") -knitr::kable(d, escape = FALSE, align='c') %>% kable_styling("striped", position = "center") -``` - -## Estimands and Identification - -Let $\pi_s(x)$ denote the conditional (on $x$) probability that an individual belongs to principal stratum $s$: -\begin{equation} -\pi_s(x)=\operatorname{Pr}(S=s \mid X=x), -\end{equation} -and let $\gamma_s^{v z}(x)$ denote the potential outcome probability for given values $v$ and $z$: -\begin{equation} -\gamma_s^{v z}(x)=\operatorname{Pr}(Y(v, z)=1 \mid S=s, X=x). -\end{equation} - -Various estimands of interest may be expressed in terms of the functions $\gamma_c^{vz}(x)$. In particular, the complier conditional average treatment effect $$\gamma_c^{1,z}(x) - \gamma_c^{0,z}(x)$$ is the ultimate goal (for either $z=0$ or $z=1$). Under an exclusion restriction, we would have $\gamma_s^{vz}(x) = \gamma_s^{v}(x)$ and the reminder status $z$ itself would not matter. In that case, we can estimate $$\gamma_c^{1,z}(x) - \gamma_c^{0,z}$$ and $$\gamma_c^{1,1}(x) - \gamma_c^{0,0}(x).$$ This latter quantity is called the complier intent-to-treat effect, or $ITT_c$, and it can be partially identify even if the exclusion restriction is violated, as follows. - -The left-hand side of the following system of equations are all estimable quantities that can be learned from observable data, while the right hand side expressions involve the unknown functions of interest, $\gamma_s^{vz}(x)$: - -\begin{equation} -\begin{aligned} -p_{1 \mid 00}(x) = \operatorname{Pr}(Y=1 \mid V=0, Z=0, X=x)=\frac{\pi_c(x)}{\pi_c(x)+\pi_n(x)} \gamma_c^{00}(x)+\frac{\pi_n(x)}{\pi_c(x)+\pi_n(x)} \gamma_n^{00}(x) \\ -p_{1 \mid 11}(x) =\operatorname{Pr}(Y=1 \mid V=1, Z=1, X=x)=\frac{\pi_c(x)}{\pi_c(x)+\pi_a(x)} \gamma_c^{11}(x)+\frac{\pi_a(x)}{\pi_c(x)+\pi_a(x)} \gamma_a^{11}(x) \\ -p_{1 \mid 01}(x) =\operatorname{Pr}(Y=1 \mid V=0, Z=1, X=x)=\frac{\pi_d(x)}{\pi_d(x)+\pi_n(x)} \gamma_d^{01}(x)+\frac{\pi_n(x)}{\pi_d(x)+\pi_n(x)} \gamma_n^{01}(x) \\ -p_{1 \mid 10}(x) =\operatorname{Pr}(Y=1 \mid V=1, Z=0, X=x)=\frac{\pi_d(x)}{\pi_d(x)+\pi_a(x)} \gamma_d^{10}(x)+\frac{\pi_a(x)}{\pi_d(x)+\pi_a(x)} \gamma_a^{10}(x) -\end{aligned} -\end{equation} - -Furthermore, we have -\begin{equation} -\begin{aligned} -\operatorname{Pr}(V=1 \mid Z=0, X=x)&=\pi_a(x)+\pi_d(x)\\ -\operatorname{Pr}(V=1 \mid Z=1, X=x)&=\pi_a(x)+\pi_c(x) -\end{aligned} -\end{equation} - -Under the monotonicy assumption, $\pi_d(x) = 0$ and these expressions simplify somewhat. -\begin{equation} -\begin{aligned} -p_{1 \mid 00}(x)&=\frac{\pi_c(x)}{\pi_c(x)+\pi_n(x)} \gamma_c^{00}(x)+\frac{\pi_n(x)}{\pi_c(x)+\pi_n(x)} \gamma_n^{00}(x) \\ -p_{1 \mid 11}(x)&=\frac{\pi_c(x)}{\pi_c(x)+\pi_a(x)} \gamma_c^{11}(x)+\frac{\pi_a(x)}{\pi_c(x)+\pi_a(x)} \gamma_a^{11}(x) \\ -p_{1 \mid 01}(x)&=\gamma_n^{01}(x) \\ -p_{1 \mid 10}(x)&=\gamma_a^{10}(x) -\end{aligned} -\end{equation} -and -\begin{equation} -\begin{aligned} -\operatorname{Pr}(V=1 \mid Z=0, X=x)&=\pi_a(x)\\ -\operatorname{Pr}(V=1 \mid Z=1, X=x)&=\pi_a(x)+\pi_c(x) -\end{aligned} -\end{equation} - -The exclusion restriction would dictate that $\gamma_s^{01}(x) = \gamma_s^{00}(x)$ and $\gamma_s^{11}(x) = \gamma_s^{10}(x)$ for all $s$. This has two implications. One, $\gamma_n^{01}(x) = \gamma_n^{00}(x)$ and $\gamma_a^{10}(x) = \gamma_a^{11}(x)$,and because the left-hand terms are identified, this permits $\gamma_c^{11}(x)$ and $\gamma_c^{00}(x)$ to be solved for by substitution. Two, with these two quantities solved for, we also have the two other quantities (the different settings of $z$), since $\gamma_c^{11}(x) = \gamma_c^{10}(x)$ and $\gamma_c^{00}(x) = \gamma_c^{01}(x)$. Consequently, both of our estimands from above can be estimated: - -$$\gamma_c^{11}(x) - \gamma_c^{01}(x)$$ -and - -$$\gamma_c^{10}(x) - \gamma_c^{00}(x)$$ -because they are both (supposing the exclusion restriction holds) the same as - -$$\gamma_c^{11}(x) - \gamma_c^{00}(x).$$ -If the exclusion restriction does *not* hold, then the three above treatment effects are all (potentially) distinct and not much can be said about the former two. The latter one, the $ITT_c$, however, can be partially identified, by recognizing that the first two equations (in our four equation system) provide non-trivial bounds based on the fact that while $\gamma_c^{11}(x)$ and $\gamma_c^{00}(x)$ are no longer identified, as probabilities both must lie between 0 and 1. Thus, - -\begin{equation} -\begin{aligned} - \max\left( - 0, \frac{\pi_c(x)+\pi_n(x)}{\pi_c(x)}p_{1\mid 00}(x) - \frac{\pi_n(x)}{\pi_c(x)} - \right) -&\leq\gamma^{00}_c(x)\leq - \min\left( - 1, \frac{\pi_c(x)+\pi_n(x)}{\pi_c(x)}p_{1\mid 00}(x) - \right)\\\\ -% -\max\left( - 0, \frac{\pi_a(x)+\pi_c(x)}{\pi_c(x)}p_{1\mid 11}(x) - \frac{\pi_a(x)}{\pi_c(x)} -\right) -&\leq\gamma^{11}_c(x)\leq -\min\left( - 1, \frac{\pi_a(x)+\pi_c(x)}{\pi_c(x)}p_{1\mid 11}(x) -\right) -\end{aligned} -\end{equation} - -The point of all this is that the data (plus a no-defiers assumption) lets us estimate all the necessary inputs to these upper and lower bounds on $\gamma^{11}_c(x)$ and $\gamma^{00}_c(x)$ which in turn define our estimand. What remains is to estimate those inputs, as functions of $x$, and to do so while enforcing the monotonicty restriction $$\operatorname{Pr}(V=1 \mid Z=0, X=x)=\pi_a(x) \leq -\operatorname{Pr}(V=1 \mid Z=1, X=x)=\pi_a(x)+\pi_c(x).$$ - -We can do all of this with calls to stochtree from R (or Python). But first, let's generate some test data. - -### Simulate the data - -Start with some initial setup / housekeeping - -```{r preliminaries} -library(stochtree) - -# size of the training sample -n <- 20000 - -# To set the seed for reproducibility/illustration purposes, replace "NULL" with a positive integer -random_seed <- NULL -``` - -First, we generate the instrument exogenously - -```{r instrument} -z <- rbinom(n, 1, 0.5) -``` - -Next, we generate the covariate. (For this example, let's think of it as patient age, although we are generating it from a uniform distribution between 0 and 3, so you have to imagine that it has been pre-standardized to this scale. It keeps the DGPs cleaner for illustration purposes.) - -```{r covariate} -p_X <- 1 -X <- matrix(runif(n*p_X, 0, 3), ncol = p_X) -x <- X[,1] # for ease of reference later -``` - -Next, we generate the principal strata $S$ based on the observed value of $X$. We generate it according to a logistic regression with two coefficients per strata, an intercept and a slope. Here, these coefficients are set so that the probability of being a never taker decreases with age. - -```{r principal strata} -alpha_a <- 0 -beta_a <- 1 - -alpha_n <- 1 -beta_n <- -1 - -alpha_c <- 1 -beta_c <- 1 - -# define function (a logistic model) to generate Pr(S = s | X = x) -pi_s <- function(xval){ - - w_a <- exp(alpha_a + beta_a*xval) - w_n <- exp(alpha_n + beta_n*xval) - w_c <- exp(alpha_c + beta_c*xval) - - w <- cbind(w_a, w_n, w_c) - colnames(w) <- c("w_a","w_n","w_c") - w <- w/rowSums(w) - - return(w) - -} -s <- sapply(1:n, function(j) sample(c("a","n","c"), 1, prob = pi_s(X[j,1]))) -``` - -Next, we generate the treatment variable, here denoted $V$ (for "vaccine"), as a *deterministic* function of $S$ and $Z$; this is what gives the principal strata their meaning. - -```{r vaccine} -v <- 1*(s=="a") + 0*(s=="n") + z*(s=="c") + (1-z)*(s == "d") -``` - -Finally, the outcome structural model is specified, based on which the outcome is sampled. By varying this function in particular ways, we can alter the identification conditions. - -```{r ymodel} -gamfun <- function(xval,vval, zval,sval){ - - # if this function depends on zval, then exclusion restriction is violated - # if this function does not depend on sval, then IV analysis wasn't necessary - # if this function does not depend on x, then there are no HTEs - - baseline <- pnorm(2 -1*xval - 2.5*(xval-1.5)^2 - 0.5*zval + 1*(sval=="n") - 1*(sval=="a") ) - prob <- baseline - 0.5*vval*baseline # 0.5*vval*baseline - - return(prob) -} - -# Generate the observed outcome -y <- rbinom(n, 1, gamfun(X[,1],v,z,s)) -``` - -Lastly, we perform some organization for our supervised learning algorithms later on. - -```{r organizedata} -# Concatenate X, v and z for our supervised learning algorithms -Xall <- cbind(X,v,z) - -# update the size of "X" to be the size of Xall -p_X <- p_X + 2 - -# For the monotone probit model it is necessary to sort the observations so that the Z=1 cases are all together -# at the start of the outcome vector. -index <- sort(z,decreasing = TRUE, index.return = TRUE) - -X <- matrix(X[index$ix,],ncol= 1) -Xall <- Xall[index$ix,] -z <- z[index$ix] -v <- v[index$ix] -s <- s[index$ix] -y <- y[index$ix] -x <- x[index$ix] -``` - -Now let's see if we can recover these functions from the observed data. - - -### Fit the outcome model - -We have to fit three models here, the treatment models: $\operatorname{Pr}(V = 1 | Z = 1, X=x)$ and $\operatorname{Pr}(V = 1 | Z = 0,X = x)$, subject to the monotonicity constraint $\operatorname{Pr}(V = 1 | Z = 1, X=x) \geq \operatorname{Pr}(V = 1 | Z = 0,X = x)$, and an outcome model $\operatorname{Pr}(Y = 1 | Z = 1, V = 1, X = x)$. All of this will be done with stochtree. - -The outcome model is fit with a single (S-learner) BART model. This part of the model could be fit as a T-Learner or as a BCF model. Here we us an S-Learner for simplicity. Both models are probit models, and use the well-known @albert1993bayesian data augmentation Gibbs sampler. This section covers the more straightforward outcome model. The next section describes how the monotonicity constraint is handled with a data augmentation Gibbs sampler. - -These models could (and probably should) be wrapped as functions. Here they are implemented as scripts, with the full loops shown. The output -- at the end of the loops -- are stochtree forest objects from which we can extract posterior samples and generate predictions. In particular, the $ITT_c$ will be constructed using posterior counterfactual predictions derived from these forest objects. - -We begin by setting a bunch of hyperparameters and instantiating the forest objects to be operated upon in the main sampling loop. We also initialize the latent variables. - -```{r outcomefit1} -# Fit the BART model for Pr(Y = 1 | Z = 1, V = 1, X = x) - -# Set number of iterations -num_warmstart <- 10 -num_mcmc <- 1000 -num_samples <- num_warmstart + num_mcmc - -# Set a bunch of hyperparameters. These are ballpark default values. -alpha <- 0.95 -beta <- 2 -min_samples_leaf <- 1 -max_depth <- 20 -num_trees <- 50 -cutpoint_grid_size = 100 -global_variance_init = 1. -tau_init = 0.5 -leaf_prior_scale = matrix(c(tau_init), ncol = 1) -a_leaf <- 2. -b_leaf <- 0.5 -leaf_regression <- F -feature_types <- as.integer(c(rep(0, p_X-2),1,1)) # 0 = numeric -var_weights <- rep(1,p_X)/p_X -outcome_model_type <- 0 - -# C++ dataset -forest_dataset <- createForestDataset(Xall) - -# Random number generator (std::mt19937) -if (is.null(random_seed)) { - rng <- createCppRNG(-1) -} else { - rng <- createCppRNG(random_seed) -} - -# Sampling data structures -forest_model_config <- createForestModelConfig( - feature_types = feature_types, num_trees = num_trees, num_features = p_X, - num_observations = n, variable_weights = var_weights, leaf_dimension = 1, - alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, - max_depth = max_depth, leaf_model_type = outcome_model_type, - leaf_model_scale = leaf_prior_scale, cutpoint_grid_size = cutpoint_grid_size -) -global_model_config <- createGlobalModelConfig(global_error_variance = 1) -forest_model <- createForestModel(forest_dataset, forest_model_config, global_model_config) - -# Container of forest samples -forest_samples <- createForestSamples(num_trees, 1, T, F) - -# "Active" forest state -active_forest <- createForest(num_trees, 1, T, F) - -# Initialize the latent outcome zed -n1 <- sum(y) -zed <- 0.25*(2*as.numeric(y) - 1) - -# C++ outcome variable -outcome <- createOutcome(zed) - -# Initialize the active forest and subtract each root tree's predictions from outcome -active_forest$prepare_for_sampler(forest_dataset, outcome, forest_model, outcome_model_type, 0.0) -active_forest$adjust_residual(forest_dataset, outcome, forest_model, FALSE, FALSE) -``` - -Now we enter the main loop, which involves only two steps: sample the forest, given the latent utilities, then sample the latent utilities given the estimated conditional means defined by the forest and its parameters. - -```{r outcomefit2} -# Initialize the Markov chain with num_warmstart grow-from-root iterations -gfr_flag <- T -for (i in 1:num_samples) { - - # The first num_warmstart iterations use the grow-from-root algorithm of He and Hahn - if (i > num_warmstart){ - gfr_flag <- F - } - - # Sample forest - forest_model$sample_one_iteration( - forest_dataset, outcome, forest_samples, active_forest, - rng, forest_model_config, global_model_config, - keep_forest = T, gfr = gfr_flag - ) - - # Get the current means - eta <- forest_samples$predict_raw_single_forest(forest_dataset, i-1) - - # Sample latent normals, truncated according to the observed outcome y - U1 <- runif(n1,pnorm(0,eta[y==1],1),1) - zed[y==1] <- qnorm(U1,eta[y==1],1) - U0 <- runif(n - n1,0, pnorm(0,eta[y==0],1)) - zed[y==0] <- qnorm(U0,eta[y==0],1) - - # Propagate the newly sampled latent outcome to the BART model - outcome$update_data(zed) - forest_model$propagate_residual_update(outcome) -} -``` - -### Fit the monotone probit model(s) - -The monotonicty constraint relies on a data augmentation as described in @papakostas2023forecasts. The implementation of this sampler is inherently cumbersome, as one of the "data" vectors is constructed from some observed data and some latent data and there are two forest objects, one of which applies to all of the observations and one of which applies to only those observations with $Z = 0$. We go into more details about this sampler in a dedicated vignette. Here we include the code, but without producing the equations derived in @papakostas2023forecasts. What is most important is simply that - -\begin{equation} -\begin{aligned} -\operatorname{Pr}(V=1 \mid Z=0, X=x)&=\pi_a(x) = \Phi_f(x)\Phi_h(x),\\ -\operatorname{Pr}(V=1 \mid Z=1, X=x)&=\pi_a(x)+\pi_c(x) = \Phi_f(x), -\end{aligned} -\end{equation} -where $\Phi_{\mu}(x)$ denotes the normal cumulative distribution function with mean $\mu(x)$ and variance 1. - -We first create a secondary data matrix for the $Z=0$ group only. We also set all of the hyperparameters and initialize the latent variables. - -```{r treatmentfit1} -# Fit the monotone probit model to the treatment such that Pr(V = 1 | Z = 1, X=x) >= Pr(V = 1 | Z = 0,X = x) - -X_h <- as.matrix(X[z==0,]) -n0 <- sum(z==0) -n1 <- sum(z==1) - -num_trees_f <- 50 -num_trees_h <- 20 -feature_types <- as.integer(rep(0, 1)) # 0 = numeric -var_weights <- rep(1,1) -cutpoint_grid_size = 100 -global_variance_init = 1. -tau_init = 1/num_trees_h -leaf_prior_scale = matrix(c(tau_init), ncol = 1) -nu <- 4 -lambda <- 0.5 -a_leaf <- 2. -b_leaf <- 0.5 -leaf_regression <- F # fit a constant leaf mean BART model - -# Instantiate the C++ dataset objects -forest_dataset_f <- createForestDataset(X) -forest_dataset_h <- createForestDataset(X_h) - -# Tell it we're fitting a normal BART model -outcome_model_type <- 0 - -# Set up model configuration objects -forest_model_config_f <- createForestModelConfig( - feature_types = feature_types, num_trees = num_trees_f, num_features = ncol(X), - num_observations = nrow(X), variable_weights = var_weights, leaf_dimension = 1, - alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, - max_depth = max_depth, leaf_model_type = outcome_model_type, - leaf_model_scale = leaf_prior_scale, cutpoint_grid_size = cutpoint_grid_size -) -forest_model_config_h <- createForestModelConfig( - feature_types = feature_types, num_trees = num_trees_h, num_features = ncol(X_h), - num_observations = nrow(X_h), variable_weights = var_weights, leaf_dimension = 1, - alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, - max_depth = max_depth, leaf_model_type = outcome_model_type, - leaf_model_scale = leaf_prior_scale, cutpoint_grid_size = cutpoint_grid_size -) -global_model_config <- createGlobalModelConfig(global_error_variance = 1) - -# Instantiate the sampling data structures -forest_model_f <- createForestModel(forest_dataset_f, forest_model_config_f, global_model_config) -forest_model_h <- createForestModel(forest_dataset_h, forest_model_config_h, global_model_config) - -# Instantiate containers of forest samples -forest_samples_f <- createForestSamples(num_trees_f, 1, T) -forest_samples_h <- createForestSamples(num_trees_h, 1, T) - -# Instantiate "active" forests -active_forest_f <- createForest(num_trees_f, 1, T) -active_forest_h <- createForest(num_trees_h, 1, T) - -# Set algorithm specifications -# these are set in the earlier script for the outcome model; number of draws needs to be commensurable - -#num_warmstart <- 10 -#num_mcmc <- 2000 -#num_samples <- num_warmstart + num_mcmc - -# Initialize the Markov chain - -# Initialize (R0, R1), the latent binary variables that enforce the monotonicty - -v1 <- v[z==1] -v0 <- v[z==0] - -R1 = rep(NA,n0) -R0 = rep(NA,n0) - -R1[v0==1] <- 1 -R0[v0==1] <- 1 - -R1[v0 == 0] <- 0 -R0[v0 == 0] <- sample(c(0,1),sum(v0==0),replace=TRUE) - -# The first n1 observations of vaug are actually observed -# The next n0 of them are the latent variable R1 -vaug <- c(v1, R1) - -# Initialize the Albert and Chib latent Gaussian variables -z_f <- (2*as.numeric(vaug) - 1) -z_h <- (2*as.numeric(R0)-1) -z_f <- z_f/sd(z_f) -z_h <- z_h/sd(z_h) - -# Pass these variables to the BART models as outcome variables -outcome_f <- createOutcome(z_f) -outcome_h <- createOutcome(z_h) - -# Initialize active forests to constant (0) predictions -active_forest_f$prepare_for_sampler(forest_dataset_f, outcome_f, forest_model_f, outcome_model_type, 0.0) -active_forest_h$prepare_for_sampler(forest_dataset_h, outcome_h, forest_model_h, outcome_model_type, 0.0) -active_forest_f$adjust_residual(forest_dataset_f, outcome_f, forest_model_f, FALSE, FALSE) -active_forest_h$adjust_residual(forest_dataset_h, outcome_h, forest_model_h, FALSE, FALSE) -``` - -Now we run the main sampling loop, which consists of three key steps: sample the BART forests, given the latent probit utilities, sampling the latent binary outcome pairs (this is the step that is necessary for enforcing monotonicity), given the forest predictions and the latent utilities, and finally sample the latent utilities. - -```{r treatmentfit2} -# PART IV: run the Markov chain - -# Initialize the Markov chain with num_warmstart grow-from-root iterations -gfr_flag <- T -for (i in 1:num_samples) { - - # Switch over to random walk Metropolis-Hastings tree updates after num_warmstart - if (i > num_warmstart) { - gfr_flag <- F - } - - # Step 1: Sample the BART forests - - # Sample forest for the function f based on (y_f, R1) - forest_model_f$sample_one_iteration( - forest_dataset_f, outcome_f, forest_samples_f, active_forest_f, - rng, forest_model_config_f, global_model_config, - keep_forest = T, gfr = gfr_flag - ) - - # Sample forest for the function h based on outcome R0 - forest_model_h$sample_one_iteration( - forest_dataset_h, outcome_h, forest_samples_h, active_forest_h, - rng, forest_model_config_h, global_model_config, - keep_forest = T, gfr = gfr_flag - ) - - # Extract the means for use in sampling the latent variables - eta_f <- forest_samples_f$predict_raw_single_forest(forest_dataset_f, i-1) - eta_h <- forest_samples_h$predict_raw_single_forest(forest_dataset_h, i-1) - - - # Step 2: sample the latent binary pair (R0, R1) given eta_h, eta_f, and y_g - - # Three cases: (0,0), (0,1), (1,0) - w1 <- (1 - pnorm(eta_h[v0==0]))*(1-pnorm(eta_f[n1 + which(v0==0)])) - w2 <- (1 - pnorm(eta_h[v0==0]))*pnorm(eta_f[n1 + which(v0==0)]) - w3 <- pnorm(eta_h[v0==0])*(1 - pnorm(eta_f[n1 + which(v0==0)])) - - s <- w1 + w2 + w3 - w1 <- w1/s - w2 <- w2/s - w3 <- w3/s - - u <- runif(sum(v0==0)) - temp <- 1*(u < w1) + 2*(u > w1 & u < w1 + w2) + 3*(u > w1 + w2) - - R1[v0==0] <- 1*(temp==2) - R0[v0==0] <- 1*(temp==3) - - # Redefine y with the updated R1 component - vaug <- c(v1, R1) - - # Step 3: sample the latent normals, given (R0, R1) and y_f - - # First z0 - U1 <- runif(sum(R0),pnorm(0, eta_h[R0==1],1),1) - z_h[R0==1] <- qnorm(U1, eta_h[R0==1],1) - - U0 <- runif(n0 - sum(R0),0, pnorm(0, eta_h[R0==0],1)) - z_h[R0==0] <- qnorm(U0, eta_h[R0==0],1) - - # Then z1 - U1 <- runif(sum(vaug),pnorm(0, eta_f[vaug==1],1),1) - z_f[vaug==1] <- qnorm(U1, eta_f[vaug==1],1) - - U0 <- runif(n - sum(vaug),0, pnorm(0, eta_f[vaug==0],1)) - z_f[vaug==0] <- qnorm(U0, eta_f[vaug==0],1) - - # Propagate the updated outcomes through the BART models - outcome_h$update_data(z_h) - forest_model_h$propagate_residual_update(outcome_h) - - outcome_f$update_data(z_f) - forest_model_f$propagate_residual_update(outcome_f) - - # No more steps, just repeat a bunch of times -} -``` - -### Extracting the estimates and plotting the results. - -Now for the most interesting part, which is taking the stochtree BART model fits and producing the causal estimates of interest. - -First we set up our grid for plotting the functions in $X$. This is possible in this example because the moderator, age, is one dimensional; in may applied problems this will not be the case and visualization will be substantially trickier. - -```{r plot1} -# Extract the credible intervals for the conditional treatment effects as a function of x. -# We use a grid of values for plotting, with grid points that are typically fewer than the number of observations. - -ngrid <- 200 -xgrid <- seq(0.1,2.5,length.out = ngrid) -X_11 <- cbind(xgrid,rep(1,ngrid),rep(1,ngrid)) - -X_00 <- cbind(xgrid,rep(0,ngrid),rep(0,ngrid)) -X_01 <- cbind(xgrid,rep(0,ngrid),rep(1,ngrid)) -X_10 <- cbind(xgrid,rep(1,ngrid),rep(0,ngrid)) -``` - -Next, we compute the truth function evaluations on this plotting grid, using the functions defined above when we generated our data. - -```{r plot2} -# Compute the true conditional outcome probabilities for plotting -pi_strat <- pi_s(xgrid) -w_a <- pi_strat[,1] -w_n <- pi_strat[,2] -w_c <- pi_strat[,3] - -w <- (w_c/(w_a + w_c)) - -p11_true <- w*gamfun(xgrid,1,1,"c") + (1-w)*gamfun(xgrid,1,1,"a") - -w <- (w_c/(w_n + w_c)) - -p00_true <- w*gamfun(xgrid,0,0,"c") + (1-w)*gamfun(xgrid,0,0,"n") - -# Compute the true ITT_c for plotting and comparison -itt_c_true <- gamfun(xgrid,1,1,"c") - gamfun(xgrid,0,0,"c") - -# Compute the true LATE for plotting and comparison -LATE_true0 <- gamfun(xgrid,1,0,"c") - gamfun(xgrid,0,0,"c") -LATE_true1 <- gamfun(xgrid,1,1,"c") - gamfun(xgrid,0,1,"c") -``` - -Next we populate the data structures for stochtree to operate on, call the predict functions to extract the predictions, convert them to probability scale using the built in `pnorm` function. - -```{r plot3} -# Datasets for counterfactual predictions -forest_dataset_grid <- createForestDataset(cbind(xgrid)) -forest_dataset_11 <- createForestDataset(X_11) -forest_dataset_00 <- createForestDataset(X_00) -forest_dataset_10 <- createForestDataset(X_10) -forest_dataset_01 <- createForestDataset(X_01) - -# Forest predictions -preds_00 <- forest_samples$predict(forest_dataset_00) -preds_11 <- forest_samples$predict(forest_dataset_11) -preds_01 <- forest_samples$predict(forest_dataset_01) -preds_10 <- forest_samples$predict(forest_dataset_10) - -# Probability transformations -phat_00 <- pnorm(preds_00) -phat_11 <- pnorm(preds_11) -phat_01 <- pnorm(preds_01) -phat_10 <- pnorm(preds_10) - -# Cleanup -rm(preds_00) -rm(preds_11) -rm(preds_01) -rm(preds_10) - - -preds_ac <- forest_samples_f$predict(forest_dataset_grid) -phat_ac <- pnorm(preds_ac) - -preds_adj <- forest_samples_h$predict(forest_dataset_grid) -phat_a <- pnorm(preds_ac)*pnorm(preds_adj) -rm(preds_adj) -rm(preds_ac) - -phat_c <- phat_ac - phat_a - -phat_n <- 1 - phat_ac -``` - -Now we may plot posterior means of various quantities (as a function of $X$) to visualize how well the models are fitting. - -```{r plot4, fig.align='center'} -# Set up the plotting window -par(mfrow=c(1,2)) - -# Plot the fitted outcome probabilities against the truth -plot(p11_true,rowMeans(phat_11),pch=20,cex=0.5,bty='n') -abline(0,1,col='red') - -plot(p00_true,rowMeans(phat_00),pch=20,cex=0.5,bty='n') -abline(0,1,col='red') - -par(mfrow=c(1,3)) -plot(rowMeans(phat_ac),w_c +w_a,pch=20) -abline(0,1,col='red') - -plot(rowMeans(phat_a),w_a,pch=20) -abline(0,1,col='red') - -plot(rowMeans(phat_c),w_c,pch=20) -abline(0,1,col='red') -``` - -These plots are not as pretty as we might hope, but mostly this is a function of how difficult it is to learn conditional probabilities from binary outcomes. That we capture the trend broadly turns out to be adequate for estimating treatment effects. Fit does improve with simpler DGPs and larger training sets, as can be confirmed by experimentation with this script. - -Lastly, we can construct the estimate of the $ITT_c$ and compare it to the true value as well as the $Z=0$ and $Z=1$ complier average treatment effects (also called "local average treatment effects" or LATE). The key step in this process is to center our posterior on the identified interval (at each iteration of the sampler) at the value implied by a valid exclusion restriction. For some draws this will not be possible, as that value will be outside the identification region. - -```{r plot5, fig.height = 3} -# Generate draws from the posterior of the treatment effect -# centered at the point-identified value under the exclusion restriction -itt_c <- late <- matrix(NA,ngrid, ncol(phat_c)) -ss <- 6 -for (j in 1:ncol(phat_c)){ - - # Value of gamma11 implied by an exclusion restriction - gamest11 <- ((phat_a[,j] + phat_c[,j])/phat_c[,j])*phat_11[,j] - phat_10[,j]*phat_a[,j]/phat_c[,j] - - # Identified region for gamma11 - lower11 <- pmax(rep(0,ngrid), ((phat_a[,j] + phat_c[,j])/phat_c[,j])*phat_11[,j] - phat_a[,j]/phat_c[,j]) - upper11 <- pmin(rep(1,ngrid), ((phat_a[,j] + phat_c[,j])/phat_c[,j])*phat_11[,j]) - - # Center a beta distribution at gamma11, but restricted to (lower11, upper11) - # do this by shifting and scaling the mean, drawing from a beta on (0,1), then shifting and scaling to the - # correct restricted interval - m11 <- (gamest11 - lower11)/(upper11-lower11) - - # Parameters to the beta - a1 <- ss*m11 - b1 <- ss*(1-m11) - - # When the corresponding mean is out-of-range, sample from a beta with mass piled near the violated boundary - a1[m11<0] <- 1 - b1[m11<0] <- 5 - - a1[m11>1] <- 5 - b1[m11>1] <- 1 - - # Value of gamma00 implied by an exclusion restriction - gamest00 <- ((phat_n[,j] + phat_c[,j])/phat_c[,j])*phat_00[,j] - phat_01[,j]*phat_n[,j]/phat_c[,j] - - # Identified region for gamma00 - lower00 <- pmax(rep(0,ngrid), ((phat_n[,j] + phat_c[,j])/phat_c[,j])*phat_00[,j] - phat_n[,j]/phat_c[,j]) - upper00 <- pmin(rep(1,ngrid), ((phat_n[,j] + phat_c[,j])/phat_c[,j])*phat_00[,j]) - - # Center a beta distribution at gamma00, but restricted to (lower00, upper00) - # do this by shifting and scaling the mean, drawing from a beta on (0,1), then shifting and scaling to the - # correct restricted interval - m00 <- (gamest00 - lower00)/(upper00-lower00) - - a0 <- ss*m00 - b0 <- ss*(1-m00) - - a0[m00<0] <- 1 - b0[m00<0] <- 5 - - a0[m00>1] <- 5 - b0[m00>1] <- 1 - - # ITT and LATE - itt_c[,j] <- lower11 + (upper11 - lower11)*rbeta(ngrid,a1,b1) - (lower00 + (upper00 - lower00)*rbeta(ngrid,a0,b0)) - late[,j] <- gamest11 - gamest00 -} - -upperq <- apply(itt_c,1,quantile,0.975) -lowerq <- apply(itt_c,1,quantile,0.025) -``` - -And now we can plot all of this, using the "polygon" function to shade posterior quantiles. - -```{r plot6, fig.align="center"} -par(mfrow=c(1,1)) -plot(xgrid,itt_c_true,pch=20,cex=0.5,ylim=c(-0.75,0.05),bty='n',type='n',xlab='x',ylab='Treatment effect') - -upperq_er <- apply(late,1,quantile,0.975,na.rm=TRUE) - -lowerq_er <- apply(late,1,quantile,0.025,na.rm=TRUE) - -polygon(c(xgrid,rev(xgrid)),c(lowerq,rev(upperq)),col=rgb(0.5,0.25,0,0.25),pch=20,border=FALSE) -polygon(c(xgrid,rev(xgrid)),c(lowerq_er,rev(upperq_er)),col=rgb(0,0,0.5,0.25),pch=20,border=FALSE) - -itt_c_est <- rowMeans(itt_c) -late_est <- rowMeans(late) -lines(xgrid,late_est,col="slategray",lwd=3) - -lines(xgrid,itt_c_est,col='goldenrod1',lwd=1) - -lines(xgrid,LATE_true0,col="black",lwd=2,lty=3) -lines(xgrid,LATE_true1,col="black",lwd=2,lty=2) - -lines(xgrid,itt_c_true,col="black",lwd=1) -``` - -With a valid exclusion restriction the three black curves would all be the same. With no exclusion restriction, as we have here, the direct effect of $Z$ on $Y$ (the vaccine reminder on flu status) makes it so these three treatment effects are different. Specifically, the $ITT_c$ compares getting the vaccine *and* getting the reminder to not getting the vaccine *and* not getting the reminder. When both things have risk reducing impacts, we see a larger risk reduction over all values of $X$. Meanwhile, the two LATE effects compare the isolated impact of the vaccine among people that got the reminder and those that didn't, respectively. Here, not getting the reminder makes the vaccine more effective because the risk reduction is as a fraction of baseline risk, and the reminder reduces baseline risk in our DGP. - -We see also that the posterior mean of the $ITT_c$ estimate (gold) is very similar to the posterior mean under the assumption of an exclusion restriction (gray). This is by design...they will only deviate due to Monte Carlo variation or due to the rare situations where the exclusion restriction is incompatible with the identification interval. - -By changing the sample size and various aspects of the DGP this script allows us to build some intuition for how aspects of the DGP affect posterior inferences, particularly how violates of assumptions affect accuracy and posterior uncertainty. - -# References diff --git a/vignettes/R/IV/iv.bib b/vignettes/R/IV/iv.bib deleted file mode 100644 index b0eba609b..000000000 --- a/vignettes/R/IV/iv.bib +++ /dev/null @@ -1,79 +0,0 @@ -@article{mcdonald1992effects, - title={Effects of computer reminders for influenza vaccination on morbidity during influenza epidemics.}, - author={McDonald, Clement J and Hui, Siu L and Tierney, William M}, - journal={MD computing: computers in medical practice}, - volume={9}, - number={5}, - pages={304--312}, - year={1992} -} - -@article{hirano2000assessing, - author = {Hirano, Keisuke and Imbens, Guido W. and Rubin, Donald B. and Zhou, Xiao-Hua}, - title = {Assessing the effect of an influenza vaccine in an - encouragement design }, - journal = {Biostatistics}, - volume = {1}, - number = {1}, - pages = {69-88}, - year = {2000}, - month = {03}, - issn = {1465-4644}, - doi = {10.1093/biostatistics/1.1.69}, - url = {https://doi.org/10.1093/biostatistics/1.1.69}, - eprint = {https://academic.oup.com/biostatistics/article-pdf/1/1/69/17744019/100069.pdf}, -} - -@incollection{richardson2011transparent, - author = {Richardson, Thomas S. and Evans, Robin J. and Robins, James M.}, - isbn = {9780199694587}, - title = {Transparent Parametrizations of Models for Potential Outcomes}, - booktitle = {Bayesian Statistics 9}, - publisher = {Oxford University Press}, - year = {2011}, - month = {10}, - doi = {10.1093/acprof:oso/9780199694587.003.0019}, - url = {https://doi.org/10.1093/acprof:oso/9780199694587.003.0019}, - eprint = {https://academic.oup.com/book/0/chapter/141661815/chapter-ag-pdf/45787772/book\_1879\_section\_141661815.ag.pdf}, -} - -@book{imbens2015causal, - place={Cambridge}, - title={Causal Inference for Statistics, Social, and Biomedical Sciences: An Introduction}, - publisher={Cambridge University Press}, - author={Imbens, Guido W. and Rubin, Donald B.}, - year={2015} -} - -@article{hahn2016bayesian, - title={A Bayesian partial identification approach to inferring the prevalence of accounting misconduct}, - author={Hahn, P Richard and Murray, Jared S and Manolopoulou, Ioanna}, - journal={Journal of the American Statistical Association}, - volume={111}, - number={513}, - pages={14--26}, - year={2016}, - publisher={Taylor \& Francis} -} - -@article{albert1993bayesian, - title={Bayesian analysis of binary and polychotomous response data}, - author={Albert, James H and Chib, Siddhartha}, - journal={Journal of the American statistical Association}, - volume={88}, - number={422}, - pages={669--679}, - year={1993}, - publisher={Taylor \& Francis} -} - -@article{papakostas2023forecasts, - title={Do forecasts of bankruptcy cause bankruptcy? A machine learning sensitivity analysis}, - author={Papakostas, Demetrios and Hahn, P Richard and Murray, Jared and Zhou, Frank and Gerakos, Joseph}, - journal={The Annals of Applied Statistics}, - volume={17}, - number={1}, - pages={711--739}, - year={2023}, - publisher={Institute of Mathematical Statistics} -} \ No newline at end of file diff --git a/vignettes/R/IV/iv.html b/vignettes/R/IV/iv.html deleted file mode 100644 index 98de3c632..000000000 --- a/vignettes/R/IV/iv.html +++ /dev/null @@ -1,1795 +0,0 @@ - - - - - - - - - - - - - - - - -Instrumental Variables (IV) with Stochtree - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - - -
-

Introduction

-

Here we consider a causal inference problem with a binary treatment -and a binary outcome where there is unobserved confounding, but an -exogenous instrument is available (also binary). This problem will -require a number of extensions to the basic BART model, all of which can -be implemented straightforwardly as Gibbs samplers using -stochtree. We’ll go through all of the model fitting steps -in quite a lot of detail here.

-
-
-

Background

-

To be concrete, suppose we wish to measure the effect of receiving a -flu vaccine on the probability of getting the flu. Individuals who opt -to get a flu shot differ in many ways from those that don’t, and these -lifestyle differences presumably also affect their respective chances of -getting the flu. Consequently, comparing the percentage of individuals -who get the flu in the vaccinated and unvaccinated groups does not give -a clear picture of the vaccine efficacy.

-

However, a so-called encouragement design can be implemented, where -some individuals are selected at random to be given some extra incentive -to get a flu shot (free clinics at the workplace or a personalized -reminder, for example). Studying the impact of this randomized -encouragement allows us to tease apart the impact of the vaccine from -the confounding factors, at least to some extent. This exact problem has -been considered several times in the literature, starting with McDonald, Hui, and Tierney (1992) with follow-on -analysis by Hirano et al. (2000), Richardson, Evans, and Robins (2011), and Imbens and Rubin (2015).

-

Our analysis here follows the Bayesian nonparametric approach -described in the supplement to Hahn, Murray, and -Manolopoulou (2016).

-
-

Notation

-

Let \(V\) denote the treatment -variable (as in “vaccine”). Let \(Y\) -denote the response variable (getting the flu), \(Z\) denote the instrument (encouragement or -reminder to get a flu shot), and \(X\) -denote an additional observable covariate (for instance, patient -age).

-

Further, let \(S\) denote the -so-called principal strata, which is an exhaustive -characterization of how individuals’ might be affected by the -encouragement regarding the flu shot. Some people will get a flu shot no -matter what: these are the always takers (a). Some people will -not get the flu shot no matter what: these are the never takers -(n). For both always-takers and never-takers, the randomization of the -encouragement is irrelevant and our data set contains no always takers -who skipped the vaccine and no never takers who got the vaccine and so -the treatment effect of the vaccine in these groups is fundamentally -non-identifiable.

-

By contrast, we also have compliers (c): folks who would not -have gotten the shot but for the fact that they were encouraged to do -so. These are the people about whom our randomized encouragement -provides some information, because they are precisely the ones that have -been randomized to treatment.

-

Lastly, we could have defiers (d): contrarians who who were -planning on getting the shot, but – upon being reminded – decided not -to! For our analysis we will do the usual thing of assuming that there -are no defiers. And because we are going to simulate our data, we can -make sure that this assumption is true.

-
-
-

The causal diagram

-

The causal diagram for this model can be expressed as follows. Here -we are considering one confounder and moderator variable (\(X\)), which is the patient’s age. In our -data generating process (which we know because this is a simulation -demonstration) higher age will make it more likely that a person is an -always taker or complier and less likely that they are a never taker, -which in turn has an effect on flu risk. We stipulate here that always -takers are at lower risk and never takers at higher risk. -Simultaneously, age has an increasing and then decreasing direct effect -on flu risk; very young and very old are at higher risk, while young and -middle age adults are at lower risk. In this DGP the flu efficacy has a -multiplicative effect, reducing flu risk as a fixed proportion of -baseline risk – accordingly, the treatment effect (as a difference) is -nonlinear in Age (for each principal stratum).

-
-The causal directed acyclic graph (CDAG) for the instrumental variables flu example. -

-The causal directed acyclic graph (CDAG) for the instrumental variables -flu example. -

-
-

The biggest question about this graph concerns the dashed red arrow -from the putative instrument \(Z\) to -the outcome (flu). We say “putative” because if that dashed red arrow is -there, then technically \(Z\) is not a -valid instrument. The assumption/assertion that there is no dashed red -arrow is called the “exclusion restriction”. In this vignette, we will -explore what sorts of inferences are possible if we remain agnostic -about the presence or absence of that dashed red arrow.

-
-
-

Potential outcomes

-

There are two relevant potential outcomes in an instrumental -variables analysis, corresponding to the causal effect of the instrument -on the treatment and the causal effect of the treatment on the outcome. -In this example, that is the effect of the reminder/encouragement on -vaccine status and the effect of the vaccine itself on the flu. The -notation is \(V(Z)\) and \(Y(V(Z),Z)\) respectively, so that we have -six distinct random variables: \(V(0)\), \(V(1)\), \(Y(0,0)\), \(Y(1,0)\), \(Y(0,1)\) and \(Y(1,1)\). The problem – sometimes called -the fundamental problem of causal inference – is that some of -these random variables can never be seen simultaneously, they are -observationally mutually exclusive. For this reason, it may be helpful -to think about causal inference as a missing data problem, as depicted -in the following table.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-\(i\) - -\(Z_i\) - -\(V_i(0)\) - -\(V_i(1)\) - -\(Y_i(0,0)\) - -\(Y_i(1,0)\) - -\(Y_i(0,1)\) - -\(Y_i(1,1)\) -
-1 - -1 - -? - -1 - -? - -? - -? - -0 -
-2 - -0 - -1 - -? - -? - -1 - -? - -? -
-3 - -0 - -0 - -? - -1 - -? - -? - -? -
-4 - -1 - -? - -0 - -? - -? - -0 - -? -
-\(\vdots\) - -\(\vdots\) - -\(\vdots\) - -\(\vdots\) - -\(\vdots\) - -\(\vdots\) - -\(\vdots\) - -\(\vdots\) -
-

Likewise, with this notation we can formally define the principal -strata:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-\(V_i(0)\) - -\(V_i(1)\) - -\(S_i\) -
-0 - -0 - -Never Taker (n) -
-1 - -1 - -Always Taker (a) -
-0 - -1 - -Complier (c) -
-1 - -0 - -Defier (d) -
-
-
-

Estimands and Identification

-

Let \(\pi_s(x)\) denote the -conditional (on \(x\)) probability that -an individual belongs to principal stratum \(s\): \[\begin{equation} -\pi_s(x)=\operatorname{Pr}(S=s \mid X=x), -\end{equation}\] and let \(\gamma_s^{v -z}(x)\) denote the potential outcome probability for given values -\(v\) and \(z\): \[\begin{equation} -\gamma_s^{v z}(x)=\operatorname{Pr}(Y(v, z)=1 \mid S=s, X=x). -\end{equation}\]

-

Various estimands of interest may be expressed in terms of the -functions \(\gamma_c^{vz}(x)\). In -particular, the complier conditional average treatment effect \[\gamma_c^{1,z}(x) - \gamma_c^{0,z}(x)\] is -the ultimate goal (for either \(z=0\) -or \(z=1\)). Under an exclusion -restriction, we would have \(\gamma_s^{vz}(x) -= \gamma_s^{v}(x)\) and the reminder status \(z\) itself would not matter. In that case, -we can estimate \[\gamma_c^{1,z}(x) - -\gamma_c^{0,z}\] and \[\gamma_c^{1,1}(x) - \gamma_c^{0,0}(x).\] -This latter quantity is called the complier intent-to-treat effect, or -\(ITT_c\), and it can be partially -identify even if the exclusion restriction is violated, as follows.

-

The left-hand side of the following system of equations are all -estimable quantities that can be learned from observable data, while the -right hand side expressions involve the unknown functions of interest, -\(\gamma_s^{vz}(x)\):

-

\[\begin{equation} -\begin{aligned} -p_{1 \mid 00}(x) = \operatorname{Pr}(Y=1 \mid V=0, Z=0, -X=x)=\frac{\pi_c(x)}{\pi_c(x)+\pi_n(x)} -\gamma_c^{00}(x)+\frac{\pi_n(x)}{\pi_c(x)+\pi_n(x)} \gamma_n^{00}(x) \\ -p_{1 \mid 11}(x) =\operatorname{Pr}(Y=1 \mid V=1, Z=1, -X=x)=\frac{\pi_c(x)}{\pi_c(x)+\pi_a(x)} -\gamma_c^{11}(x)+\frac{\pi_a(x)}{\pi_c(x)+\pi_a(x)} \gamma_a^{11}(x) \\ -p_{1 \mid 01}(x) =\operatorname{Pr}(Y=1 \mid V=0, Z=1, -X=x)=\frac{\pi_d(x)}{\pi_d(x)+\pi_n(x)} -\gamma_d^{01}(x)+\frac{\pi_n(x)}{\pi_d(x)+\pi_n(x)} \gamma_n^{01}(x) \\ -p_{1 \mid 10}(x) =\operatorname{Pr}(Y=1 \mid V=1, Z=0, -X=x)=\frac{\pi_d(x)}{\pi_d(x)+\pi_a(x)} -\gamma_d^{10}(x)+\frac{\pi_a(x)}{\pi_d(x)+\pi_a(x)} \gamma_a^{10}(x) -\end{aligned} -\end{equation}\]

-

Furthermore, we have \[\begin{equation} -\begin{aligned} -\operatorname{Pr}(V=1 \mid Z=0, X=x)&=\pi_a(x)+\pi_d(x)\\ -\operatorname{Pr}(V=1 \mid Z=1, X=x)&=\pi_a(x)+\pi_c(x) -\end{aligned} -\end{equation}\]

-

Under the monotonicy assumption, \(\pi_d(x) -= 0\) and these expressions simplify somewhat. \[\begin{equation} -\begin{aligned} -p_{1 \mid 00}(x)&=\frac{\pi_c(x)}{\pi_c(x)+\pi_n(x)} -\gamma_c^{00}(x)+\frac{\pi_n(x)}{\pi_c(x)+\pi_n(x)} \gamma_n^{00}(x) \\ -p_{1 \mid 11}(x)&=\frac{\pi_c(x)}{\pi_c(x)+\pi_a(x)} -\gamma_c^{11}(x)+\frac{\pi_a(x)}{\pi_c(x)+\pi_a(x)} \gamma_a^{11}(x) \\ -p_{1 \mid 01}(x)&=\gamma_n^{01}(x) \\ -p_{1 \mid 10}(x)&=\gamma_a^{10}(x) -\end{aligned} -\end{equation}\] and \[\begin{equation} -\begin{aligned} -\operatorname{Pr}(V=1 \mid Z=0, X=x)&=\pi_a(x)\\ -\operatorname{Pr}(V=1 \mid Z=1, X=x)&=\pi_a(x)+\pi_c(x) -\end{aligned} -\end{equation}\]

-

The exclusion restriction would dictate that \(\gamma_s^{01}(x) = \gamma_s^{00}(x)\) and -\(\gamma_s^{11}(x) = \gamma_s^{10}(x)\) -for all \(s\). This has two -implications. One, \(\gamma_n^{01}(x) = -\gamma_n^{00}(x)\) and \(\gamma_a^{10}(x) = \gamma_a^{11}(x)\),and -because the left-hand terms are identified, this permits \(\gamma_c^{11}(x)\) and \(\gamma_c^{00}(x)\) to be solved for by -substitution. Two, with these two quantities solved for, we also have -the two other quantities (the different settings of \(z\)), since \(\gamma_c^{11}(x) = \gamma_c^{10}(x)\) and -\(\gamma_c^{00}(x) = -\gamma_c^{01}(x)\). Consequently, both of our estimands from -above can be estimated:

-

\[\gamma_c^{11}(x) - -\gamma_c^{01}(x)\] and

-

\[\gamma_c^{10}(x) - -\gamma_c^{00}(x)\] because they are both (supposing the exclusion -restriction holds) the same as

-

\[\gamma_c^{11}(x) - -\gamma_c^{00}(x).\] If the exclusion restriction does -not hold, then the three above treatment effects are all -(potentially) distinct and not much can be said about the former two. -The latter one, the \(ITT_c\), however, -can be partially identified, by recognizing that the first two equations -(in our four equation system) provide non-trivial bounds based on the -fact that while \(\gamma_c^{11}(x)\) -and \(\gamma_c^{00}(x)\) are no longer -identified, as probabilities both must lie between 0 and 1. Thus,

-

\[\begin{equation} -\begin{aligned} - \max\left( - 0, \frac{\pi_c(x)+\pi_n(x)}{\pi_c(x)}p_{1\mid 00}(x) - -\frac{\pi_n(x)}{\pi_c(x)} - \right) -&\leq\gamma^{00}_c(x)\leq - \min\left( - 1, \frac{\pi_c(x)+\pi_n(x)}{\pi_c(x)}p_{1\mid 00}(x) - \right)\\\\ -% -\max\left( - 0, \frac{\pi_a(x)+\pi_c(x)}{\pi_c(x)}p_{1\mid 11}(x) - -\frac{\pi_a(x)}{\pi_c(x)} -\right) -&\leq\gamma^{11}_c(x)\leq -\min\left( - 1, \frac{\pi_a(x)+\pi_c(x)}{\pi_c(x)}p_{1\mid 11}(x) -\right) -\end{aligned} -\end{equation}\]

-

The point of all this is that the data (plus a no-defiers assumption) -lets us estimate all the necessary inputs to these upper and lower -bounds on \(\gamma^{11}_c(x)\) and -\(\gamma^{00}_c(x)\) which in turn -define our estimand. What remains is to estimate those inputs, as -functions of \(x\), and to do so while -enforcing the monotonicty restriction \[\operatorname{Pr}(V=1 \mid Z=0, X=x)=\pi_a(x) -\leq -\operatorname{Pr}(V=1 \mid Z=1, X=x)=\pi_a(x)+\pi_c(x).\]

-

We can do all of this with calls to stochtree from R (or Python). But -first, let’s generate some test data.

-
-

Simulate the data

-

Start with some initial setup / housekeeping

-
library(stochtree)
-
-# size of the training sample
-n <- 20000
-
-# To set the seed for reproducibility/illustration purposes, replace "NULL" with a positive integer
-random_seed <- NULL
-

First, we generate the instrument exogenously

-
z <- rbinom(n, 1, 0.5)
-

Next, we generate the covariate. (For this example, let’s think of it -as patient age, although we are generating it from a uniform -distribution between 0 and 3, so you have to imagine that it has been -pre-standardized to this scale. It keeps the DGPs cleaner for -illustration purposes.)

-
p_X <- 1
-X <- matrix(runif(n*p_X, 0, 3), ncol = p_X)
-x <- X[,1] # for ease of reference later
-

Next, we generate the principal strata \(S\) based on the observed value of \(X\). We generate it according to a logistic -regression with two coefficients per strata, an intercept and a slope. -Here, these coefficients are set so that the probability of being a -never taker decreases with age.

-
alpha_a <- 0
-beta_a <- 1
-
-alpha_n <- 1
-beta_n <- -1
-
-alpha_c <- 1
-beta_c <- 1
-
-# define function (a logistic model) to generate Pr(S = s | X = x)
-pi_s <- function(xval){
-  
-  w_a <- exp(alpha_a + beta_a*xval)
-  w_n <- exp(alpha_n + beta_n*xval)
-  w_c <- exp(alpha_c + beta_c*xval)
-   
-  w <- cbind(w_a, w_n, w_c)
-  colnames(w) <- c("w_a","w_n","w_c")
-  w <- w/rowSums(w)
-  
-  return(w)
-  
-}
-s <- sapply(1:n, function(j) sample(c("a","n","c"), 1, prob = pi_s(X[j,1])))
-

Next, we generate the treatment variable, here denoted \(V\) (for “vaccine”), as a -deterministic function of \(S\) and \(Z\); this is what gives the principal -strata their meaning.

-
v <- 1*(s=="a") + 0*(s=="n") + z*(s=="c") + (1-z)*(s == "d")
-

Finally, the outcome structural model is specified, based on which -the outcome is sampled. By varying this function in particular ways, we -can alter the identification conditions.

-
gamfun <- function(xval,vval, zval,sval){
-  
-  # if this function depends on zval, then exclusion restriction is violated
-  # if this function does not depend on sval, then IV analysis wasn't necessary
-  # if this function does not depend on x, then there are no HTEs
-  
-  baseline <- pnorm(2 -1*xval - 2.5*(xval-1.5)^2 - 0.5*zval + 1*(sval=="n") - 1*(sval=="a") )
-  prob <- baseline - 0.5*vval*baseline # 0.5*vval*baseline
-  
-  return(prob)
-}
-
-# Generate the observed outcome
-y <- rbinom(n, 1, gamfun(X[,1],v,z,s))
-

Lastly, we perform some organization for our supervised learning -algorithms later on.

-
# Concatenate X, v and z for our supervised learning algorithms
-Xall <- cbind(X,v,z)
-
-# update the size of "X" to be the size of Xall
-p_X <- p_X + 2
-
-# For the monotone probit model it is necessary to sort the observations so that the Z=1 cases are all together
-# at the start of the outcome vector.  
-index <- sort(z,decreasing = TRUE, index.return = TRUE)
-
-X <- matrix(X[index$ix,],ncol= 1)
-Xall <- Xall[index$ix,]
-z <- z[index$ix]
-v <- v[index$ix]
-s <- s[index$ix]
-y <- y[index$ix]
-x <- x[index$ix]
-

Now let’s see if we can recover these functions from the observed -data.

-
-
-

Fit the outcome model

-

We have to fit three models here, the treatment models: \(\operatorname{Pr}(V = 1 | Z = 1, X=x)\) and -\(\operatorname{Pr}(V = 1 | Z = 0,X = -x)\), subject to the monotonicity constraint \(\operatorname{Pr}(V = 1 | Z = 1, X=x) \geq -\operatorname{Pr}(V = 1 | Z = 0,X = x)\), and an outcome model -\(\operatorname{Pr}(Y = 1 | Z = 1, V = 1, X = -x)\). All of this will be done with stochtree.

-

The outcome model is fit with a single (S-learner) BART model. This -part of the model could be fit as a T-Learner or as a BCF model. Here we -us an S-Learner for simplicity. Both models are probit models, and use -the well-known Albert and Chib (1993) data -augmentation Gibbs sampler. This section covers the more straightforward -outcome model. The next section describes how the monotonicity -constraint is handled with a data augmentation Gibbs sampler.

-

These models could (and probably should) be wrapped as functions. -Here they are implemented as scripts, with the full loops shown. The -output – at the end of the loops – are stochtree forest objects from -which we can extract posterior samples and generate predictions. In -particular, the \(ITT_c\) will be -constructed using posterior counterfactual predictions derived from -these forest objects.

-

We begin by setting a bunch of hyperparameters and instantiating the -forest objects to be operated upon in the main sampling loop. We also -initialize the latent variables.

-
# Fit the BART model for Pr(Y = 1 | Z = 1, V = 1, X = x)
-
-# Set number of iterations
-num_warmstart <- 10
-num_mcmc <- 1000
-num_samples <- num_warmstart + num_mcmc
-
-# Set a bunch of hyperparameters. These are ballpark default values.
-alpha <- 0.95
-beta <- 2
-min_samples_leaf <- 1
-max_depth <- 20
-num_trees <- 50
-cutpoint_grid_size = 100
-global_variance_init = 1.
-tau_init = 0.5
-leaf_prior_scale = matrix(c(tau_init), ncol = 1)
-a_leaf <- 2.
-b_leaf <- 0.5
-leaf_regression <- F
-feature_types <- as.integer(c(rep(0, p_X-2),1,1)) # 0 = numeric
-var_weights <- rep(1,p_X)/p_X
-outcome_model_type <- 0
-
-# C++ dataset
-forest_dataset <- createForestDataset(Xall)
-
-# Random number generator (std::mt19937)
-if (is.null(random_seed)) {
-    rng <- createCppRNG(-1)
-} else {
-    rng <- createCppRNG(random_seed)
-}
-
-# Sampling data structures
-forest_model_config <- createForestModelConfig(
-  feature_types = feature_types, num_trees = num_trees, num_features = p_X, 
-  num_observations = n, variable_weights = var_weights, leaf_dimension = 1, 
-  alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, 
-  max_depth = max_depth, leaf_model_type = outcome_model_type, 
-  leaf_model_scale = leaf_prior_scale, cutpoint_grid_size = cutpoint_grid_size
-)
-global_model_config <- createGlobalModelConfig(global_error_variance = 1)
-forest_model <- createForestModel(forest_dataset, forest_model_config, global_model_config)
-
-# Container of forest samples
-forest_samples <- createForestSamples(num_trees, 1, T, F)
-
-# "Active" forest state
-active_forest <- createForest(num_trees, 1, T, F)
-
-# Initialize the latent outcome zed
-n1 <- sum(y)
-zed <- 0.25*(2*as.numeric(y) - 1)
-
-# C++ outcome variable
-outcome <- createOutcome(zed)
-
-# Initialize the active forest and subtract each root tree's predictions from outcome
-active_forest$prepare_for_sampler(forest_dataset, outcome, forest_model, outcome_model_type, 0.0)
-active_forest$adjust_residual(forest_dataset, outcome, forest_model, FALSE, FALSE)
-

Now we enter the main loop, which involves only two steps: sample the -forest, given the latent utilities, then sample the latent utilities -given the estimated conditional means defined by the forest and its -parameters.

-
# Initialize the Markov chain with num_warmstart grow-from-root iterations
-gfr_flag <- T
-for (i in 1:num_samples) {
-  
-  # The first num_warmstart iterations use the grow-from-root algorithm of He and Hahn
-  if (i > num_warmstart){
-    gfr_flag <- F
-  } 
-  
-  # Sample forest
-  forest_model$sample_one_iteration(
-    forest_dataset, outcome, forest_samples, active_forest, 
-    rng, forest_model_config, global_model_config, 
-    keep_forest = T, gfr = gfr_flag
-  )
-  
-  # Get the current means
-  eta <- forest_samples$predict_raw_single_forest(forest_dataset, i-1)
-  
-  # Sample latent normals, truncated according to the observed outcome y
-  U1 <- runif(n1,pnorm(0,eta[y==1],1),1)
-  zed[y==1] <- qnorm(U1,eta[y==1],1)
-  U0 <- runif(n - n1,0, pnorm(0,eta[y==0],1))
-  zed[y==0] <- qnorm(U0,eta[y==0],1)
-  
-  # Propagate the newly sampled latent outcome to the BART model
-  outcome$update_data(zed)
-  forest_model$propagate_residual_update(outcome)
-}
-
-
-

Fit the monotone probit model(s)

-

The monotonicty constraint relies on a data augmentation as described -in Papakostas et al. (2023). The -implementation of this sampler is inherently cumbersome, as one of the -“data” vectors is constructed from some observed data and some latent -data and there are two forest objects, one of which applies to all of -the observations and one of which applies to only those observations -with \(Z = 0\). We go into more details -about this sampler in a dedicated vignette. Here we include the code, -but without producing the equations derived in Papakostas et al. (2023). What is most important -is simply that

-

\[\begin{equation} -\begin{aligned} -\operatorname{Pr}(V=1 \mid Z=0, X=x)&=\pi_a(x) = -\Phi_f(x)\Phi_h(x),\\ -\operatorname{Pr}(V=1 \mid Z=1, X=x)&=\pi_a(x)+\pi_c(x) = \Phi_f(x), -\end{aligned} -\end{equation}\] where \(\Phi_{\mu}(x)\) denotes the normal -cumulative distribution function with mean \(\mu(x)\) and variance 1.

-

We first create a secondary data matrix for the \(Z=0\) group only. We also set all of the -hyperparameters and initialize the latent variables.

-
# Fit the monotone probit model to the treatment such that Pr(V = 1 | Z = 1, X=x) >= Pr(V = 1 | Z = 0,X = x) 
-
-X_h <- as.matrix(X[z==0,])
-n0 <- sum(z==0)
-n1 <- sum(z==1)
-
-num_trees_f <- 50
-num_trees_h <- 20
-feature_types <- as.integer(rep(0, 1)) # 0 = numeric
-var_weights <- rep(1,1)
-cutpoint_grid_size = 100
-global_variance_init = 1.
-tau_init = 1/num_trees_h
-leaf_prior_scale = matrix(c(tau_init), ncol = 1)
-nu <- 4
-lambda <- 0.5
-a_leaf <- 2.
-b_leaf <- 0.5
-leaf_regression <- F # fit a constant leaf mean BART model
-
-# Instantiate the C++ dataset objects
-forest_dataset_f <- createForestDataset(X)
-forest_dataset_h <- createForestDataset(X_h)
-
-# Tell it we're fitting a normal BART model
-outcome_model_type <- 0
-
-# Set up model configuration objects
-forest_model_config_f <- createForestModelConfig(
-  feature_types = feature_types, num_trees = num_trees_f, num_features = ncol(X), 
-  num_observations = nrow(X), variable_weights = var_weights, leaf_dimension = 1, 
-  alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, 
-  max_depth = max_depth, leaf_model_type = outcome_model_type, 
-  leaf_model_scale = leaf_prior_scale, cutpoint_grid_size = cutpoint_grid_size
-)
-forest_model_config_h <- createForestModelConfig(
-  feature_types = feature_types, num_trees = num_trees_h, num_features = ncol(X_h), 
-  num_observations = nrow(X_h), variable_weights = var_weights, leaf_dimension = 1, 
-  alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, 
-  max_depth = max_depth, leaf_model_type = outcome_model_type, 
-  leaf_model_scale = leaf_prior_scale, cutpoint_grid_size = cutpoint_grid_size
-)
-global_model_config <- createGlobalModelConfig(global_error_variance = 1)
-
-# Instantiate the sampling data structures
-forest_model_f <- createForestModel(forest_dataset_f, forest_model_config_f, global_model_config)
-forest_model_h <- createForestModel(forest_dataset_h, forest_model_config_h, global_model_config)
-
-# Instantiate containers of forest samples
-forest_samples_f <- createForestSamples(num_trees_f, 1, T)
-forest_samples_h <- createForestSamples(num_trees_h, 1, T)
-
-# Instantiate "active" forests
-active_forest_f <- createForest(num_trees_f, 1, T)
-active_forest_h <- createForest(num_trees_h, 1, T)
-
-# Set algorithm specifications 
-# these are set in the earlier script for the outcome model; number of draws needs to be commensurable 
-
-#num_warmstart <- 10
-#num_mcmc <- 2000
-#num_samples <- num_warmstart + num_mcmc
-
-# Initialize the Markov chain
-
-# Initialize (R0, R1), the latent binary variables that enforce the monotonicty 
-
-v1 <- v[z==1]
-v0 <- v[z==0]
-
-R1 = rep(NA,n0)
-R0 = rep(NA,n0)
-
-R1[v0==1] <- 1
-R0[v0==1] <- 1
-
-R1[v0 == 0] <- 0
-R0[v0 == 0] <- sample(c(0,1),sum(v0==0),replace=TRUE)
-
-# The first n1 observations of vaug are actually observed
-# The next n0 of them are the latent variable R1
-vaug <- c(v1, R1)
-
-# Initialize the Albert and Chib latent Gaussian variables
-z_f <- (2*as.numeric(vaug) - 1)
-z_h <- (2*as.numeric(R0)-1)
-z_f <- z_f/sd(z_f)
-z_h <- z_h/sd(z_h)
-
-# Pass these variables to the BART models as outcome variables
-outcome_f <- createOutcome(z_f)
-outcome_h <- createOutcome(z_h)
-
-# Initialize active forests to constant (0) predictions
-active_forest_f$prepare_for_sampler(forest_dataset_f, outcome_f, forest_model_f, outcome_model_type, 0.0)
-active_forest_h$prepare_for_sampler(forest_dataset_h, outcome_h, forest_model_h, outcome_model_type, 0.0)
-active_forest_f$adjust_residual(forest_dataset_f, outcome_f, forest_model_f, FALSE, FALSE)
-active_forest_h$adjust_residual(forest_dataset_h, outcome_h, forest_model_h, FALSE, FALSE)
-

Now we run the main sampling loop, which consists of three key steps: -sample the BART forests, given the latent probit utilities, sampling the -latent binary outcome pairs (this is the step that is necessary for -enforcing monotonicity), given the forest predictions and the latent -utilities, and finally sample the latent utilities.

-
# PART IV: run the Markov chain 
-
-# Initialize the Markov chain with num_warmstart grow-from-root iterations
-gfr_flag <- T
-for (i in 1:num_samples) {
-  
-  # Switch over to random walk Metropolis-Hastings tree updates after num_warmstart
-  if (i > num_warmstart) {
-    gfr_flag <- F
-  }
-  
-  # Step 1: Sample the BART forests
-  
-  # Sample forest for the function f based on (y_f, R1)
-  forest_model_f$sample_one_iteration(
-    forest_dataset_f, outcome_f, forest_samples_f, active_forest_f, 
-    rng, forest_model_config_f, global_model_config, 
-    keep_forest = T, gfr = gfr_flag
-  )
-  
-  # Sample forest for the function h based on outcome R0
-  forest_model_h$sample_one_iteration(
-    forest_dataset_h, outcome_h, forest_samples_h, active_forest_h,
-    rng, forest_model_config_h, global_model_config, 
-    keep_forest = T, gfr = gfr_flag
-  )
-  
-  # Extract the means for use in sampling the latent variables
-  eta_f <- forest_samples_f$predict_raw_single_forest(forest_dataset_f, i-1)
-  eta_h <- forest_samples_h$predict_raw_single_forest(forest_dataset_h, i-1)
-  
-  
-  # Step 2: sample the latent binary pair (R0, R1) given eta_h, eta_f, and y_g
-  
-  # Three cases: (0,0), (0,1), (1,0)
-  w1 <- (1 - pnorm(eta_h[v0==0]))*(1-pnorm(eta_f[n1 + which(v0==0)]))
-  w2 <-   (1 - pnorm(eta_h[v0==0]))*pnorm(eta_f[n1 + which(v0==0)])
-  w3 <- pnorm(eta_h[v0==0])*(1 - pnorm(eta_f[n1 + which(v0==0)]))
-  
-  s <- w1 + w2 + w3
-  w1 <- w1/s
-  w2 <- w2/s
-  w3 <- w3/s
-  
-  u <- runif(sum(v0==0))
-  temp <- 1*(u < w1) + 2*(u > w1 & u < w1 + w2) + 3*(u > w1 + w2)
-  
-  R1[v0==0] <- 1*(temp==2)
-  R0[v0==0] <- 1*(temp==3)
-  
-  # Redefine y with the updated R1 component 
-  vaug <- c(v1, R1)
-  
-  # Step 3: sample the latent normals, given (R0, R1) and y_f
-  
-  # First z0
-  U1 <- runif(sum(R0),pnorm(0, eta_h[R0==1],1),1)
-  z_h[R0==1] <- qnorm(U1, eta_h[R0==1],1)
-  
-  U0 <- runif(n0 - sum(R0),0, pnorm(0, eta_h[R0==0],1))
-  z_h[R0==0] <- qnorm(U0, eta_h[R0==0],1)
-  
-  # Then z1
-  U1 <- runif(sum(vaug),pnorm(0, eta_f[vaug==1],1),1)
-  z_f[vaug==1] <- qnorm(U1, eta_f[vaug==1],1)
-  
-  U0 <- runif(n - sum(vaug),0, pnorm(0, eta_f[vaug==0],1))
-  z_f[vaug==0] <- qnorm(U0, eta_f[vaug==0],1)
-  
-  # Propagate the updated outcomes through the BART models
-  outcome_h$update_data(z_h)
-  forest_model_h$propagate_residual_update(outcome_h)
-  
-  outcome_f$update_data(z_f)
-  forest_model_f$propagate_residual_update(outcome_f)
-  
-  # No more steps, just repeat a bunch of times
-}
-
-
-

Extracting the estimates and plotting the results.

-

Now for the most interesting part, which is taking the stochtree BART -model fits and producing the causal estimates of interest.

-

First we set up our grid for plotting the functions in \(X\). This is possible in this example -because the moderator, age, is one dimensional; in may applied problems -this will not be the case and visualization will be substantially -trickier.

-
# Extract the credible intervals for the conditional treatment effects as a function of x.
-# We use a grid of values for plotting, with grid points that are typically fewer than the number of observations.
-
-ngrid <- 200
-xgrid <- seq(0.1,2.5,length.out = ngrid)
-X_11 <- cbind(xgrid,rep(1,ngrid),rep(1,ngrid))
-
-X_00 <- cbind(xgrid,rep(0,ngrid),rep(0,ngrid))
-X_01 <- cbind(xgrid,rep(0,ngrid),rep(1,ngrid))
-X_10 <- cbind(xgrid,rep(1,ngrid),rep(0,ngrid))
-

Next, we compute the truth function evaluations on this plotting -grid, using the functions defined above when we generated our data.

-
# Compute the true conditional outcome probabilities for plotting
-pi_strat <- pi_s(xgrid)
-w_a <- pi_strat[,1]
-w_n <- pi_strat[,2]
-w_c <- pi_strat[,3]
-
-w <- (w_c/(w_a + w_c))
-
-p11_true <- w*gamfun(xgrid,1,1,"c") + (1-w)*gamfun(xgrid,1,1,"a")
-
-w <- (w_c/(w_n + w_c))
-
-p00_true <- w*gamfun(xgrid,0,0,"c") + (1-w)*gamfun(xgrid,0,0,"n")
-
-# Compute the true ITT_c for plotting and comparison
-itt_c_true <- gamfun(xgrid,1,1,"c") - gamfun(xgrid,0,0,"c")
-
-# Compute the true LATE for plotting and comparison
-LATE_true0 <- gamfun(xgrid,1,0,"c") - gamfun(xgrid,0,0,"c")
-LATE_true1 <- gamfun(xgrid,1,1,"c") - gamfun(xgrid,0,1,"c")
-

Next we populate the data structures for stochtree to operate on, -call the predict functions to extract the predictions, convert them to -probability scale using the built in pnorm function.

-
# Datasets for counterfactual predictions
-forest_dataset_grid <- createForestDataset(cbind(xgrid))
-forest_dataset_11 <- createForestDataset(X_11)
-forest_dataset_00 <- createForestDataset(X_00)
-forest_dataset_10 <- createForestDataset(X_10)
-forest_dataset_01 <- createForestDataset(X_01)
-
-# Forest predictions
-preds_00 <- forest_samples$predict(forest_dataset_00)
-preds_11 <- forest_samples$predict(forest_dataset_11)
-preds_01 <- forest_samples$predict(forest_dataset_01)
-preds_10 <- forest_samples$predict(forest_dataset_10)
-
-# Probability transformations
-phat_00 <- pnorm(preds_00)
-phat_11 <- pnorm(preds_11)
-phat_01 <- pnorm(preds_01)
-phat_10 <- pnorm(preds_10)
-
-# Cleanup
-rm(preds_00)
-rm(preds_11)
-rm(preds_01)
-rm(preds_10)
-
-
-preds_ac <- forest_samples_f$predict(forest_dataset_grid)
-phat_ac <- pnorm(preds_ac)
-
-preds_adj <- forest_samples_h$predict(forest_dataset_grid)
-phat_a <- pnorm(preds_ac)*pnorm(preds_adj)
-rm(preds_adj)
-rm(preds_ac)
-
-phat_c <- phat_ac - phat_a
-
-phat_n <- 1 - phat_ac
-

Now we may plot posterior means of various quantities (as a function -of \(X\)) to visualize how well the -models are fitting.

-
# Set up the plotting window
-par(mfrow=c(1,2))
-
-# Plot the fitted outcome probabilities against the truth
-plot(p11_true,rowMeans(phat_11),pch=20,cex=0.5,bty='n')
-abline(0,1,col='red')
-
-plot(p00_true,rowMeans(phat_00),pch=20,cex=0.5,bty='n')
-abline(0,1,col='red')
-

-
par(mfrow=c(1,3))
-plot(rowMeans(phat_ac),w_c +w_a,pch=20)
-abline(0,1,col='red')
-
-plot(rowMeans(phat_a),w_a,pch=20)
-abline(0,1,col='red')
-
-plot(rowMeans(phat_c),w_c,pch=20)
-abline(0,1,col='red')
-

-

These plots are not as pretty as we might hope, but mostly this is a -function of how difficult it is to learn conditional probabilities from -binary outcomes. That we capture the trend broadly turns out to be -adequate for estimating treatment effects. Fit does improve with simpler -DGPs and larger training sets, as can be confirmed by experimentation -with this script.

-

Lastly, we can construct the estimate of the \(ITT_c\) and compare it to the true value as -well as the \(Z=0\) and \(Z=1\) complier average treatment effects -(also called “local average treatment effects” or LATE). The key step in -this process is to center our posterior on the identified interval (at -each iteration of the sampler) at the value implied by a valid exclusion -restriction. For some draws this will not be possible, as that value -will be outside the identification region.

-
# Generate draws from the posterior of the treatment effect
-# centered at the point-identified value under the exclusion restriction
-itt_c <- late <- matrix(NA,ngrid, ncol(phat_c))
-ss <- 6
-for (j in 1:ncol(phat_c)){
-  
-  # Value of gamma11 implied by an exclusion restriction
-  gamest11 <- ((phat_a[,j] + phat_c[,j])/phat_c[,j])*phat_11[,j] - phat_10[,j]*phat_a[,j]/phat_c[,j]
-  
-  # Identified region for gamma11
-  lower11 <- pmax(rep(0,ngrid), ((phat_a[,j] + phat_c[,j])/phat_c[,j])*phat_11[,j] - phat_a[,j]/phat_c[,j])
-  upper11 <- pmin(rep(1,ngrid), ((phat_a[,j] + phat_c[,j])/phat_c[,j])*phat_11[,j])
-  
-  # Center a beta distribution at gamma11, but restricted to (lower11, upper11)
-  # do this by shifting and scaling the mean, drawing from a beta on (0,1), then shifting and scaling to the 
-  # correct restricted interval
-  m11 <- (gamest11 - lower11)/(upper11-lower11)
-
-  # Parameters to the beta
-  a1 <- ss*m11
-  b1 <- ss*(1-m11)
-  
-  # When the corresponding mean is out-of-range, sample from a beta with mass piled near the violated boundary
-  a1[m11<0] <- 1
-  b1[m11<0] <- 5
-  
-  a1[m11>1] <- 5
-  b1[m11>1] <- 1
-  
-  # Value of gamma00 implied by an exclusion restriction
-  gamest00 <- ((phat_n[,j] + phat_c[,j])/phat_c[,j])*phat_00[,j] - phat_01[,j]*phat_n[,j]/phat_c[,j]
-  
-  # Identified region for gamma00
-  lower00 <- pmax(rep(0,ngrid), ((phat_n[,j] + phat_c[,j])/phat_c[,j])*phat_00[,j] - phat_n[,j]/phat_c[,j])
-  upper00 <- pmin(rep(1,ngrid), ((phat_n[,j] + phat_c[,j])/phat_c[,j])*phat_00[,j])
-  
-  # Center a beta distribution at gamma00, but restricted to (lower00, upper00)
-  # do this by shifting and scaling the mean, drawing from a beta on (0,1), then shifting and scaling to the 
-  # correct restricted interval
-  m00 <- (gamest00 - lower00)/(upper00-lower00)
-  
-  a0 <- ss*m00
-  b0 <- ss*(1-m00)
-  
-  a0[m00<0] <- 1
-  b0[m00<0] <- 5
-  
-  a0[m00>1] <- 5
-  b0[m00>1] <- 1
- 
-  # ITT and LATE    
-  itt_c[,j] <- lower11 + (upper11 - lower11)*rbeta(ngrid,a1,b1) - (lower00 + (upper00 - lower00)*rbeta(ngrid,a0,b0))
-  late[,j] <- gamest11 - gamest00
-}
-
-upperq <- apply(itt_c,1,quantile,0.975)
-lowerq <- apply(itt_c,1,quantile,0.025)
-

And now we can plot all of this, using the “polygon” function to -shade posterior quantiles.

-
par(mfrow=c(1,1))
-plot(xgrid,itt_c_true,pch=20,cex=0.5,ylim=c(-0.75,0.05),bty='n',type='n',xlab='x',ylab='Treatment effect')
-
-upperq_er <- apply(late,1,quantile,0.975,na.rm=TRUE)
-
-lowerq_er <- apply(late,1,quantile,0.025,na.rm=TRUE)
-
-polygon(c(xgrid,rev(xgrid)),c(lowerq,rev(upperq)),col=rgb(0.5,0.25,0,0.25),pch=20,border=FALSE)
-polygon(c(xgrid,rev(xgrid)),c(lowerq_er,rev(upperq_er)),col=rgb(0,0,0.5,0.25),pch=20,border=FALSE)
-
-itt_c_est <- rowMeans(itt_c)
-late_est <- rowMeans(late)
-lines(xgrid,late_est,col="slategray",lwd=3)
-
-lines(xgrid,itt_c_est,col='goldenrod1',lwd=1)
-
-lines(xgrid,LATE_true0,col="black",lwd=2,lty=3)
-lines(xgrid,LATE_true1,col="black",lwd=2,lty=2)
-
-lines(xgrid,itt_c_true,col="black",lwd=1)
-

-

With a valid exclusion restriction the three black curves would all -be the same. With no exclusion restriction, as we have here, the direct -effect of \(Z\) on \(Y\) (the vaccine reminder on flu status) -makes it so these three treatment effects are different. Specifically, -the \(ITT_c\) compares getting the -vaccine and getting the reminder to not getting the vaccine -and not getting the reminder. When both things have risk -reducing impacts, we see a larger risk reduction over all values of -\(X\). Meanwhile, the two LATE effects -compare the isolated impact of the vaccine among people that got the -reminder and those that didn’t, respectively. Here, not getting the -reminder makes the vaccine more effective because the risk reduction is -as a fraction of baseline risk, and the reminder reduces baseline risk -in our DGP.

-

We see also that the posterior mean of the \(ITT_c\) estimate (gold) is very similar to -the posterior mean under the assumption of an exclusion restriction -(gray). This is by design…they will only deviate due to Monte Carlo -variation or due to the rare situations where the exclusion restriction -is incompatible with the identification interval.

-

By changing the sample size and various aspects of the DGP this -script allows us to build some intuition for how aspects of the DGP -affect posterior inferences, particularly how violates of assumptions -affect accuracy and posterior uncertainty.

-
-
-
-
-

References

-
-
-Albert, James H, and Siddhartha Chib. 1993. “Bayesian Analysis of -Binary and Polychotomous Response Data.” Journal of the -American Statistical Association 88 (422): 669–79. -
-
-Hahn, P Richard, Jared S Murray, and Ioanna Manolopoulou. 2016. “A -Bayesian Partial Identification Approach to Inferring the Prevalence of -Accounting Misconduct.” Journal of the American Statistical -Association 111 (513): 14–26. -
-
-Hirano, Keisuke, Guido W. Imbens, Donald B. Rubin, and Xiao-Hua Zhou. -2000. “Assessing the Effect of an Influenza Vaccine in an -Encouragement Design.” Biostatistics 1 (1): 69–88. https://doi.org/10.1093/biostatistics/1.1.69. -
-
-Imbens, Guido W., and Donald B. Rubin. 2015. Causal Inference for -Statistics, Social, and Biomedical Sciences: An Introduction. -Cambridge University Press. -
-
-McDonald, Clement J, Siu L Hui, and William M Tierney. 1992. -“Effects of Computer Reminders for Influenza Vaccination on -Morbidity During Influenza Epidemics.” MD Computing: -Computers in Medical Practice 9 (5): 304–12. -
-
-Papakostas, Demetrios, P Richard Hahn, Jared Murray, Frank Zhou, and -Joseph Gerakos. 2023. “Do Forecasts of Bankruptcy Cause -Bankruptcy? A Machine Learning Sensitivity Analysis.” The -Annals of Applied Statistics 17 (1): 711–39. -
-
-Richardson, Thomas S., Robin J. Evans, and James M. Robins. 2011. -“Transparent Parametrizations of Models for Potential -Outcomes.” In Bayesian Statistics 9. Oxford University -Press. https://doi.org/10.1093/acprof:oso/9780199694587.003.0019. -
-
-
- - - - -
- - - - - - - - - - - - - - - diff --git a/vignettes/R/RDD/RDD_DAG.png b/vignettes/R/RDD/RDD_DAG.png deleted file mode 100644 index a73abc16ecea1c7b31070fb5f0a5648246a23578..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38039 zcmeFZ^;?u(+crEi4md+IgCK~sbb}z>NT-x^gGe{h4I`8?Z54DnsF6vi^y2%;; z!1~`|c)85KXD4cvLwVct37rYZFCoDbUE>m$n0oNLZnq?gsqpTWzm1boY;Dl`QtC;5 z`gJAiR0MA?jOYW7SYWlSep6U&V|x`7CfOc!FqJGd zH%E_VwBC?uXQ}=)vf~K&_RK6tt*m%bte7rYkB_P+YTfXavnUmwel5{H(z<+@B=gX| z{poPo8h6iWk5p*|QRG(7xNmBOyYuXfdWXmImx$<%P3~i^Qs*HvJUs zos&isi;9X$*u}z1P(xbwf0~2eL}+Z>+?)hCI6OT)**$sK9bK$BxC8_QI5@dExVhQD z6>P5F4sNDiY!0rp|J>w%?;~yLYVKm|j(Yk-#`7d^s@c$ zlN?9M|7)aItf8{O8`Fsqpouf@-#2mQVGhZS8@1z%xYe^E?p#d;S0CoBuxX zUoCb2+meru^WQE1_09jgrIxFuiXS>hiv|NRu$ zSrki{<3DpIij`9lTn2%NL*%6;9(h5xUt`bT(rWrc!n_zLO@xDiktaz2RTnV8QI=>T zGLeN+48*CrpkIi_tIex)?h~O%dR^nB;!T`5a~k_(2r-01iBciTs9PLzvT0|hDQ$m# zy|6By%JMu}tohb#wb3A&efoN~#(HYKWh%>i!#nG^j7&=#r9>wV`|oQxv6D%(@~h>4 zKLOvGpmW6ggZ_OPK_moYK>W;$3mFIb&kI8#7K)}mJAz#=F!YHMER5k1;nXj#_XnktuhE$A-&M)pOIZPC8-V@L$0%flSmgx_%>VAHQBx+~~L0B)0)Ln>%$Wy1~j zG5smtmh9i{zx*7+8>?Ucl<=)~IqDUk{Xz1>+awZwts5D3(jER+Jq4*o3d&Zu)zTy4 zfoma~Wh*24>wLuT!A;s1tIQ*G>z}4yc^-WquTC}g|Fxc{^;Wx{uga*+C(mJ_xjwvV zKCH6sB*W|1dTQ_f*N?p?J74Ty6zSw1j|lxF;$QT7^L?)FZ82!nly@eA%nt`XZqn@* zi^4yhql0Lk4`$WHooOc!D$l6_CPYuZmLX=uA4k`F61Wu-0!r#uK8B)8peAw zc`CHBF$AxLJlTs`#Nx!sfHzVyudEum_A7DbFoJo$r!!Q$P2VW0JbxPLpg8X@d3>?Y z({FId^S?T;_}1f0@H<1ubC(ET?9xfSV^}rd{FB+`*}jKgNnOkv9bH)>U;(--LI|J4SYyDmczFF(-p{NtYq0wJO7O4UIh zMt(d;{E@hgE6qBN3%o*H@eVrR7@}fA1&(@SP2k-XtbJ^9()*m`OSfgguYF%s zWCCcXK4H1|kLS-q480c8F$sRL%OYSsLhiiQ7jJ--7>HT(`@@Sn7;rhn6gMszZ{O6a z9YP$Dni3&=M3r+XK|8JB}L`>4K8(e z?;=jo)F5IKZnGv`8|py0bJ*BrIOBe1|F)@aXG695$5@PAC=s(r;8&)2R1UsnzE`@y z-%S#j*jaD?1&cRv4`0}0`*xT-LorSEioW;vF>2^EB)!?~2Th?;I*pi%BCHC;dr~16 zV!3Mr{<;S-MRmVaB8a9UAIDci?bpjh=DKf9-9jXz8C3Gse&AA$MhAjbg~;Jn{AX2d z!skQf5i$g^Y0L}|G0h>^w7IrMaaks9B_aai$M}Aw&)}app+FG0L>Zhs2p^^kl~-wx zmkzc!-3{ktLi;h_Lppq=5y!PO$k)TW?*Cd6#LCqi6{SQ(e=lmUUf1;D^rP-_2$?B8 zD)Kp)IOljPfxlZz3WjK3XmmEePfKbV?6ET^Gi)Jko(uLB2A2reY}D@(!Y-?N40v%a zYiAe!KT~arx-IPW4n@!P>P}Fj?FL?=lc~{2l;9YqtITUj*-Ii^;HAc$EGz!r{}~L? zU0%6;FvK&ercb4O&-s@dx4$%P8@MjiEeaAhd0;5d}NW%@pqVM5T$Z%+}oUi-T9$}pX!03%| zN^A8Unq*8woWN+Kfvt=CxQ(xE-Nyt?Yc_o%6a2&d_vJ5vd+|?e9>$WFz_KYtd>+Nz z;b))4syb3a=kyDGS{VEg4_t+KD?GEN+4jkc)Q%cn>&wyi5%}1w3w_csQ*=%^_tU(g zm)GkGhX&FiV}tWm;ZvcqW=*r{$?klUtAR`nISGt_;`l5{Dgw`4;CX}`)dT1g#tN$h2v9JoHR+nObNkASrdjL{*IJR4Memjb&?O#f#VNj-ZRB?-kGb@(qmA~BC31x zStI-FOi%~CFXoc}h5bHwQdE=cH)5h^pS5(NUOBeU60@p`UAYOB;aN$w6igdEQx-m% zF<U zzGah)8EI!MdVjmC@8x?q%-`$Tabd{izWE3m#A`4AllJJUOs{5Q3O+S$0vrwQF1hxL z>p=$-FfynOv=UbC#R}~rYU#}0hvR;>AV=34kEwwGUQiun))PtinKxxq5nl*I>*`d_ zc@SQmKArpY(d=s~jnOdR1>K5_>9mbsABy?w=!@V~eLs+7>}3Y43|(~kMJlg@6<6MW zlX=_dcX1ND743KX{*$C~_mFJEKfkY7h*`(^dIY3{1=^yZrnY>Jiw0u`X@ysXFU5ZA zl0R?~S4!u;SJzE97t3{HX(|fMFvqxj()s!MWh|D_H#+g43Hi|M>!)^x1rnhO;yi#4 z`=W|pw7b+{jB()6#Cssc$wezMwmp^`(uOZVR8!GLj|k<|Ghg_ofH8QBMZ4InP&K>a zByrw*HHzWU{urbG3bwIYHp{|n+p~t9)F{$JTh*)cV@=q4cNjL4T49?MZmF2^BBnd$ zTM$&@$e~J)y^cTYV9=YlULFjZ9dCa2u%z%_i(g|-HtE-0!3?&{7WEr=BIWMtiAr?N zOe-xd-Hzhd$Z6lM9`v<~j6YTNKS1w)rTnAY8NrR+U;BFirdcn6c)}=v1!l+PA!JTA zjgu>cgzRD3!-9T31WwvB{kkUw)noU=H*-c`fBpFKi+ggalx_lb&{`E|gNAvOuqpjr zzNcw+IEs@})lwI2X{+zciu8@X>oX~>Uf5N(P3`cGe$4Vc={-o%t`B?&Q8#8 zea_~Jap;^I>P_J7SHxu*xx;M{>GkL8^ts{cuLq&{mtD27{cPu({CD^r9{ozI#=?e+ zi(n8ku82iBAO8IF`=?Jz$zsRv^Q~m@CXgj!C6(+dyE3aLHVF82WdK(Xr-RfScZJ-< zsAV4$);6n1ku?9g*p}RRyXf8RcRwmpAKt%1TJ|CKuAS;KDe&8?PrJ7r0?#&eI+#;9 zj2(EOH_}SSQ~f7KUJLbp)-krf=pRT$i2X`)!}VuC%*Dc|!$GR|K7X@{o#`I%yb40G zpFtf*;BlLTN!;v1sScWx*B)`>Q-z_B^SBHNDHn^t_U}pMcGGp-l^Zn$U3e@bzI$Ca z>stbcDBt#_#pS3cA=wfBVe)HGPlQ4CEmM?z_ke&G(@cwJgK=1?7Z zu>t?P4L6zV4JECF<+BU>@49AV(;4(E&Ok6|Hca9|>pCi%HpraUlXd30i9%(H#-I|? zr`c})l=;3L(==X9qTcl1CnEI@vy%*{ZKQIfyDe(zF++V4+`sa@2KiuN&62Jist*^6 zU_;QiJ)PygP{rbOfaFL>k;{aR1QD~1;>#;3?PpxNgiz5WijvAxD4N|V{6qJFBE@wm z;ZOcHy3s=}m7p=z3ziJ9~ ze6oTM%kd6^R3M3!GUJQBW7~ZKe$qrVhJ-j5wdwqKCb`Rg!xj%Dya1oO79RyoB$58Q)wSL) z3$iR*x6^knba8iKX|LOC#%XS;&CYUT#jDVCiGHMx+!9UddU9vvb92)j0nGhu=}UQ= zu|Zn_>PLU31FllrxbY=*MUE#m>tzq$r31%+a@HIliVI9*3m&x zanz>qAT98j~GAo(siG^}VIIV*;T8%dSLo8qZNV7QUWB_VzZ~Us0FuX!R4;gbXqQ{XD8dUpWELDHC~<@ zV1MgULUlH8``muxlF{ozuq+lOJn&1^$ZI8RbtQt5J@xhY60NSt|Km2D^pGXFvrKvW ze9g3;0dm)kAwm#+YwJ>U@gMWI5SpJO(ooEZHIuw4ba3^BGNK9o$^CS9;mXSN()}F; zXS{eCNat`8=?`gw$WfgkwkzFXW}A~GPd*dfyPmHEi$XWQwcaqHLaDUE9hij&_IJUHVq=iGY)MSiv( zGB+iCOAxOBOMS@wv}T`9O#piKF!m1r$o(~ZdRnCnUMBUfyaVEWtPZ$at};IL8h;1X z^p9@J;Pli(KUVFji2}tPd9&vv9PAzn^s!QJSQF(du&Y((3txgn`k!vnNkW3xI)iTP z%+9&sn&=Ux)!|c$vhq31%b99O_II7+J!jV_R}u^*$V2oUP3b%S!kP_nU;Wv<#m?n( z^u4;K#Zu39;rH*;4jrv&W1LBe-8k_u;7ha-F-3{ui-_-N`^;cE$=@ak!yuPH9cK^f zXQxdV`TbD*HWS$rI~F@AZ?v}+hHOtb>bgleGk&w8aTV)2wlyXwYI;}vhUIkJ&adVw zt0lay*~f9q;K;Ox;#diW&2Cl7fz{!AFGB?gg6URer%JUH{d?{4kl(pT5;Fx18XOGw zk1GQVk2}$-!nr4dJ5(taOYMHW#=j3ppoym?MOlw`6D9*HJ0RaVFiRY7--){owtrNv zbr+opJ=AuNonDSYz@>-JPcx3U=Fe({)?f;oR(y-+Pwfl&ZKYRy%Hr>&&%v-Au_MrfeBhwT^HgeZ^@vlPfzSyF*7{~`zkgM)!n-7^Fhx5@>lfOUkx0~J+s?@6~ z{61S#bob_QFgE+Uty7RyM;2xKKQ>9ea|Xqy5Z29fQSdrX21r9xGK)_1bz$gtpg3V* z@oS;k9SsnHYB96fPYlkF*d_awNUOcp=+*7YzU*y28` zuv%zf32Y!7_fF57m!eN9mqSVC5%)rojAIIt-&=kZzUui=lx>u+^&9^ctJC~l5$4A~)g7`adGYa??S~P=XZYOX}F@-f( zTSIcC-VAoUHBI7D&uR3?(%KJ_AL$89+C1**2#;e%E#8Q|6Nx zc}WOg?!8|f%auB@$l+h|{WOgwYTA`4;95BF1OPsnPm64v7;s>jNoY)&4G9<~Dv&Qv zHCq%zqF;Sjz;z>1Xtyy0AARG3bw?{$C~Pv&aiRG>|DaP=!0%tMu5qren*nJ@!OP#r z)clXeAFhH>QTyi|*(gY=QB|1VYxcQD-f=?B`wOo~thKKMm_C;MHnEqZ;)I@6nwq`f z!65)Pm8Ka=RH7rm2)agyt_u=9;9=&c0w1c!+IpMqIDJA(hV+N_dxFv&*WvwjIs1=g z#UX?D7F`C}FwhRmEOoxW4(4s1<9rXF*M@TNAlTU%e7nkFTvX`^_aJr{#H?{hdY^V( z1+w{YnLyDb(|8L}x;QGRzz@v9djk;yr8f@wv>ShbGBmi(eB7HlP%DX;RW~Q&`Pie( z{c0T6O7cUCA5d<}8NBj&3amWe;78)$ z2u$de(&Aic`tMVo-V8-b1=qni(hNlJH~sE&=?Ck5zU8TYAyS+LIUFMT><;b+W_|== zk2Fl4&?!T=snwWUB zieH{=UREC1m+=R;BMKo0!M~Z-xPIQ!7t7OlLvnq+3VmC|$4w%~Ph}#i5IZx`$)roS z!Qrm8`@@h|O@e@?af7TpL~(`%QtV1iA*prWtzx|Qm!p|441&~di})O=amXT!Z{#La zkgXh3+}@H{@49Ul=_-@CcblhZ0&a7_J{+->0Mh6^lR(*rA7Z62)^6P1zhqSUuy~J^ zlL{K6XETt)B^6A`$-6UF%!R=Ki6xjR2&ii{R!hSfNfGd35XUX>)D}*J(=Y_yyg|`C zw%8Xs%A!+|Unl`v;j4oa2;Ew#$X^Z#f4b0|%p!pCUA-v%j-In^FGSqC#1S^Bb}su7 zTb{}QmR1%>TGh-MRe&bMi_8dj(&K5cK$wu5`$~n-qpfIN^J4SWXQ!Byg11)QNBmGw zBbCj%ivl5vI>>^LZaEdAG_P5x#DqAcR@SC{7<=@Jt&$c>i|lkn4gQfoYdRxdgu5DmVr4^PN-(g((g zgPUt5qH|0;*Wg2|Ry0Aa1c7P-c2lfD$p#kPp?5F&Okfdwu|Lo-T|#VnhxR}h=`@8v zdPmJ?;l~54&@;tNGR$9bIpNebh9lu1XU{YVR@}?0GkuRa-t2CNFJN&VQc2e5171oqp?%U{KAdsQ%@y zX9v0&Y4G7~&lB}M{t?aYE7;@5Hg@jibi1T!OiKXB5neIT*Axe#0821ROGylBV?5X3 zXuM|<_d5aSQP%I@me`l*z#K%m$YoS+K?ITWjkAK zd5z4UIGXRT_MS8!bKms+vmAm?jLWGX6?0hKZvvMC4Q_wv^}En?D)i4><-AG ztlm)0Vo4#Xw8_o3`{7GP)>+=NoZ6Xm-S!kn6OB0#L?;!AYvY}+oIXuSwY)5l%PnxD zM?7v=@_$z9$2#5)GTPaIb1X?4mTpFgxV5Y9*~81HG|-GzW>bft*u)5xZwOtQ1Kb8%Me`=}8(&=NZHL)vnKI*! zc8i6dEkD~I*%>UHhkiVe0JA81o?adk1qtm3nA@XQzVFWxxZEMnxxxdFkgcbS&kQl- z_0(-vFKt$+a>^gqDI^V0b~8)vUT0*r_Fwq4DWz-w04EC*ox~<#XKiHYt2f8yPcwZV zJ{yqv^iAT5{~YjMO8PIW+CQM2(DdJi(rFth8mbIGM9YXnf+^s|C7MOVY0eAL!L}q; z3pq&1FH-m-j~*Q|97gO-bp!;A=Stnw$JQ-UxjSC0{-!hKMpN}D;GlYA?+W!Ks*37q zM9TV&oe&bT`t-c5GjNP$mCf?+-Oi9Y7d2-H`z8d>9O9@K4$LweoXJTkj_!f-5{ zc~Sf^`WA};L(_xN&tmT%RoEef+&AeqKff)?rqQl6GTfWB&r|C{&fR1hQV2?s%*Tvq zNv8s64|<_bmQKDC07P9~TC54%geiAPpfOhkim5&mvVvPgDIOE)mA^@kzcjfPd7aE! z_k!f^v9?{Y@f|l~thljo$p^>gQ}~Z>8mYpZ0sBqLV4spK0ONFb-=0xVKiMOYP_r`p z6yGFUBhf@p71sCb@Y_w)Sl*aQ%GT|L%6(8y!RVjVJ~WOhG@uyyDR!7X8u_-e{c)|$ zcsJfitR>B>`*y@{I|^%5E|p;N*X20eJtZ9pbKs(9QnJdkn~kJ_o2X3bykl~CJp10+ zhG9^<+Zh%*tYhr=*}3-YA!`()rj}On{CW!9MG@H0p(P6!UlNSA`wvzkc5yn6Wb$-j zP@wDfMTsl%<@2ic#-m(e?*qE~3I3R{%6=j!Cg12I&uT7b0w+;5%NTjAMCUPUQhD_E z`KFpSx8fxq@%K@})k!9N04-<*m3;asVdsgCbWR#)Kk2JmrfdC73-uH|JzoA5hzDWvp+b6~$g-+sZQ4}g z<8y<7a6|}zoTH>l*L!5DB&5Cx6ve&(uEm^A(xMIW0*gN;f=5xF8d}6D;C)~r#H*&Q zGu34AG^i z!mmfbliqhtGpUM0292l7QH=rzx6yPu>!EXf$HHbnD>h{fhS0_F!+JyI`@58jdmM|k z%O4*kDLJs{Su!L-W{T&p&3{2H4iSX%nA5*6&zG)` z>JtHGZ=!_3sgP=ENpy|(f;Qo;ScwM;hQ&j9O?P(60Ji?LVy4}%W$qguaxET!U&1GX zUkqM!0ybPamECaCxha!|VVRL`zMm^dAxu6BOL85Ab{@OksgPHLe6ETx?|WclRf>#y zu)rdziz*i-Of_;>@Q{d(?tJN@i0Sx`)zzOvUQr6VF|rJs3CVSmfF8BK+s`W_2;}qz zwDj{g4`0qH=W(VdXITn$QxQB$!+C>e+y51m4vLCeda9_%P|z#6cif+N3A|1hJh4ON z%XnHX`car{s#4#oV&lMut?Hr5ZVgTLph_Cdwj)- zTvm&G!Akj2zI3c6_ln=~#Dj2@CMX6~tZrElQ~zR^2C4%ipnrDsotn>n<k7bmA5o zeN+M)!@ zp2N~Gz=;8UP_O}E=M7f?7nKg{K5GQz7GT5Vs4BsA=Ud(j%9x3lpr`}Cs%_MLbDi0( zvzv|vg-U4(hf(W{(KFq<%V1uvyWk{9fxi0JrBe=N!JpphaD3kEPh@x=bmL~}=sa~$ zD;OVX**ATV3bEcWQihRUQ>V0d`>lu=AUsL!SR;YZ8$F>&bhL4&<6m1TVgCHSG5qSS z(lw*(^zRB3hV z%nuOwwGIn{nCznxOEpmiV7akgC#?|VBNed!)`z@g6c9L|3J<%PP(`mQv{Rcjne~|t zu<;;8O=Wo_9XgoURFDd0`sn1Yol!7^BIBj-L8M?v75%lv(|`f?ggO9P!TbqO0dM^P zVzUS~=aBx(Z#ivgzfR3hYpg%NxX>i1+y#p%95<2k-KC(*3fx>U?&Q!f&Cg*7;rI1@$j+KQ3e+6^V1ClX2 z?C9oap&pNO3}ZS{*!1UutpP(M4fFu8^>3$USc(4q`+_K{AO$dtvQXmD`za7Bx6_?3 z4RbLL<}5?ey$X;O13*5Q*-^Y9pjaTQvHe7UpGnCdpZYMvu7VJ(_vgL1A>H|OlnZ<% zBw^nl_hiajmif|zXi3%RvdbSnptmrgtWu>`@{#vk^e+0o)X`z{rM?_!L{d@}g|pxM zp?4yj7b~thjj7B-+f5dmX1|m{r?Bd2jMoXL!>g}@x3&-W$DK~Hh4boYKtivREePDj z=d`4CY7=D|#KoYvBq07$XGapn8Nt00yubRK1Iy+i&@&hkxaa7;zJ?6ccvFe!7Z49U zm9Ub$m_WXz>W0B(jp9s$U?_wLG4z+Nm?D(1FNahzADG%b+aoQ>>?g1ivAMb469(eQ zT)V{?c|9>EOd2q`w@R<^f_U18V0ZM30_c{I7^{0`vF(}Rofxj!B2~(9fR6R}lB#oE zhI{PJGg*yf>OFdn%(d8_sq*lvtIuwU1s16O5{`%oXvOwZSVNP3O#n%1=kNJm~yg9j;2*4I- zxq$$1M3T-aCtq_}Z8_MpJzHZ~6PoS=yh+y9wbwCUyUC;`8=Hoa9|ZAbOh1tCoY;7z zhhN65*cU=sUMZ${s2L{KG5zN0;LEZBZcQCO0CU`O4>e*xC^$5QcT+>z7_H-&&W}weS)#3kD4@!)X+nOnw0GV z@*rt>^mt4P|JLV7IpV1hJp<1L_xdQcj51UIL&5RhVT)avLna0$x7~R??Ou1F*)ag{ z7~}B*Kud(Dl?{7dKOzj2#h(+l*Ki(>`Lpv1Db3P5o$ZdP%z@wO5B( zkp^0W=nP6@dIP7Dla!kbY8s6A{V4I}^d2gr^-MO`-t$ys zDHM{&m^M_!TS`pGlbKWOd=5a*eB>I8s9k;#fUn}S?>lwDkcHC20^n{Stian3)hO$Q zAUWh>DfoqL;M9pTV5_nJu6Af|6N(jddz#ZS5yCq@_Tfh`k?2=13QaDy9kA5xctGTv zF9y+^{8`FVJm|UFW7W5;_j)YIxh>>2{f3se1pLmAo=``V@!C+02p=l#14dzvXT0s) z&8p<(;TUnzOV+uVI5p3C=ZE+IbP4Z`eTnRx1=@=V{8^q^LZz$PPmO1K(6lsB9P$Th z3vbIlK2s{FzK!C1-pOR86;ZdZ2-)9p(v%8x^y>~j-e6@N4sY|3YKmg_AB z;x4-|j+xH+Ya?05sLC8^wu-M=kH}GNao^JBTp5^<{$hT6UxtYjdEx2M6o_z&M-D2~ zJ1#B8x-z|VR=ifz{F5o~L=FUhfMp1YEI$aQRg~Z!wj0jdwaM0ece(WE%2+5zfLAZ= zYB0$k=Ec;V5OH1|mUi3$%3SG?_*Zhogd_mlx3ROgykgaXT!_8{m5XgjHU{dP=r`BCvpW*f&1OA@BqF{5xmsq0E`s`bti!CPWQ?Y%c48w1h3MZ7#F&|EmZQ{fRo zr*8NjEjpA2@i6o0y(q6Q07agL7#4@F!=R(-fHIe(!n=kga>&g{3CNCba@Gp$aSXlT zyO|hcvipfLk2C0ynrt=jIVTIhDN0hE!YDegGn#~LucrHE$u@&ZW?w3+Oo$Me9`)-& z=H5cwVR%mdg`#9!@GpMF_Jm}b5PEEU^#yor1Y_8Mg8YYPq6U0NB(yP>N3_L3{s0qWkvk_tEVQo2mGS?1TL2&LXIHGBA7TBe_7Q=4&A89BbBQWOI%Z*HVpj3 zLoFt(L@XH_A(dQIw@}!?*d8}6m`D;Z7Q>u04}_IN5ogp*%sPQEk#*ll4NbkikD;oO zN;#^XU82S@c|X!YdVH%fWFBCwG2sNbPd0#5(S&eWK_DFeMo~6ttrJ0JS~S|JqB@vU z0dYQ(Bnp)qyq%O+9s;Db~X>p}ki< zPgJA$w}0VGOv7B{}V z^V0mb&PtdZJ$%{PSdMJ}DdD9-b}eRrLTfGeU#*ia$RxvMxO;r3Dh&1CvGZSMKY$jw zbehz=tSj8QBqt|5Yjug0s0=K*)1Dx=_sXEwMtzLcyA2<9PIou0W52r>;;P5)??(6sC69JKg(G`QiIi^KWSSTT{m_NP~n8Q_pn zIpGuUj3Sn@l{nE8yo|PIOJ>}$6lbjfy#LMfHTf1{AhD7PW|!ASPjU2Km_IkY#E(Kp z8@7MeGY-H^!X>!!g)`?uK)`Y#ZH}9v@m08vq)t-GdU7U;7GEdpHScJ{v}26b?a0}m z?v)v~)W2rBpR`hK>F@)f@C6cA1$HAFL+Mhc+&%3S zl`)NoUOaUM>B(Gs4vDdEmAHQ-`Cf^tpF*bd2hu#d=?a-c&UNgvpGFl$MU+J%{#RR{HORuZ}+L-16nTGso6;GH171^^pP%q+fX=rg(GO=~TKj zUT4-~7kbVM?$c2$6^)YqmfKS$RYfN=0XjcY?mc0=a$QR%r82=Vwd4*ATsPLX#AUc} z1Y`~7B;zZsccD1FvsfRKhGogdm=$$(GRq^%p!Z8lt9hT7s-zI;9xp!o`Kxq#>Gw$k zvtod5@otrlAPKOwxL!`r%3K*bZF&j&u30_t}v0RC_+`uI`=pqgni);P! z`=Z0+ zUas5~@<0f@pMGyC``Rp+c$Se$l;zD*hwbqmGCtKZxYIV>MrPnn?#J~zV*b9Xg}a(6 z0y3-yjcTpun{QXk8x~P4+GPY@PJ*9EI?iOAKPmIiZis&KJUF~oE>-#da@ij={#{KK z$L%|ALLcgM`fD`6$l%#d-9bWM!>1xuo$jrx)1}e$kJM-}uf3d%%g~%l=L?nQ`__jW z!x$0!s5O4S%l(h5fVPGWJ}cBplm4=z81?<8%l#4{5|7Lhd3N5L@MR0c&a5x>H7j&# z;#9@aiPJY}_r@PmGPy>5=F&>3)O&|R+eqeKd-)z~(|g*`Q|pn&b7HpbEjBjm&!-)K zuB5+2o##OT~x!A%uFHBj#juM^X+M!ZP6Ys&bF@79cXZuZ!45FA7L*%*| zv^%|fF&y}nlF?%eLhnYUK|$ZCw}`Roeo5&zOj(ODMwXUhvN`qjL7uoaV|UEM%+O^a zl>JcUi*|NsXmY*@ruBiU__rToBu4VEA3)v4z2Xl<@ad&pKx0#UqWt9-Aa$vMp5aaD zaSvK`#!AdRrw`AU?&9<(N5*|S`YfMKrg6aF(pZ0Z)@a<}U)q_@V_oXE3#WwyaFh0t zZ|u?;lUpd7WC1i7?q2D2(9dA*J|4Fd`Q&W}>+WKkP^lUBtDm`25s$tcJ4SoegvQO|I(-OEbPw2Qfg3d;apmH=R-wvh5{~;fz$l5KS8nh2j zwCyyYf^0HHibrvQU8-uWkkf;mt#p1RNAb0u7pin-MgxRa!7$6&Bs4LqjDWc!wQ~p&1f#!tafg2{D92D$X)mWDecTW~uNb8ir7O#`wRKrx)b@B~ zYoVoKPSxch2_u(($6gr6S;fdt|5Kr7L$59mbZoy0x_!@KAJ;1hncr9H%ulr%P2Py2 zBlw4VIZDJ;a_0W=cCF96(0=6xvP*{^{`NOmzwBbfPs`7I%|4fE)pOc*vn~Y>TrJ&P z=!J-8sna16(r{x6X0JtljM1FTs(yTROjmd>@{~-^R3&X}W~&Q^xR3SEh-eK+QNC|V z$}7thPk$~|vU3_TlFZ5tf_Uc z*ow>UcOoi=LKeBw4fvvBi{QLd18;g{{!5S!!Y`Cg+kVY|6HPTyZY9CxDXj252mZ(A z;^6@KHIuI&cmBE3|DKy>#^qu@+mvaF{B&8rxqrQ`B)ZTZ?l$_Iib)_$U|(i~)NPA* z1;~&>H}Hb?Le>atMbr%|TNu9kzf(_6?e%rvQYQZLkuP3c03kE57Nla-zO&`8IT$G>@+N85sZI-9!3x~yNoEONqzggebOx@u&yFe znWt{GbqCNd=Iyhd}70R9xe(DKDvocvR->60gAdbNcgo<^uc?Bjlf%uf}Xo&b9h}W zFC^mk$2{qW#==!CKs0~q>H%~wlAq-8t`A5!Nem{}iTNd@C)%R2-Cg+y@K?p6)&;me z3?h?0zNBpUi2H$nzKlBI2JG8c3=}&GG3vVHcRHJ)keG65(Re3-mgtHNeWeBt1RMhN zx&SJYExQBJ?&!+S;(RYOjC7Wumh;%28P784aiX%_RaUpZ+NJuPIOcIJfUtaT;&=FJ zsg@z1`um?S5+hv*!JL!QcGu6liON#Ct`5h8ToROH_i?5~cDF|hv#Qz1(TSNg+g$VAVYS*$_ z+OVW|pg#E%$iIos>E2!o;M@b_!h{umsY>dGK7(5|nn%etz+W`tFkt){p}bta=}>gf zN9mP)!Fv$4_&bPy1zr z$*By?X64Edp=TlI8ZZ36RC(>22@wbfcUkXnM9R-{jG9dg4EIkPxGGm^4pN9YuZDVU zmJ|(ycL&#F1+M{?s~1G_#Jpc8-^Zo=8UR>Xm%XMkXY?e|q9CeK$Y+d@9Suk zYcw4a!tf|8`ney_`1YoDTv}e_{sJ2GxvX-*r!$;EGfCVlm~Ss`^?xmNXcLy(&k5}K z4y0mmdh5`81{niAj%{uMkJ8){l~e*osGPZU%5+y{*W_UpX3b2hyCeQoh)4})a_8Yxv z+UbTOUy7WhJpe}rwo8p%`tPkO8lNYW@>w+wI7x1rgnJO+7?QA0m+48>G%Eht{n9YH zZsQxliDxOHJp0ECAkCqo3(W+m8zEr#%1S5Ul0GMux8yD%KVA8Gzt3 zJX*)-*VLf|^Wug} zjxe-r-_G~>V$ts3;PhI&8#{_A(SiopEV0P%g>3A+8m>_78-u3Ljdf*fv7nAXo1M-G+F%Pv&I?{UhNuY=U@sqvsN zGNn13|Hl}QmxQ=6)k<&f1Z0xZcQ44kxqUU7r3Z*MzmA3PIQk#np}tY^XD>pG*NXJv z6~;2qI%1W#>5a(Yuc_i%raa5A|LZe!Gzc?`>|iD;d~98wIQW`nJI{c`I;D)ls(#aX z#)RyliFIP)Z-N)yLpO~FTahUmf#Dvi(~yrMcqaaT&Zj#nqW!=@fl2?SyGo)`F?}7E z>l`q8M}eTidH419h+AfTanvWaUeFNPFG)l_$M2K)dJOUXR!OYAJ}PIglgf70~^OsN!N#n00R z<}X^!rndK(FD(W1a+A$0A4jjVRJLkZB2zs4SW`op2agOl-7bU_<%*w7R~RN26uhmj z&9EFj^$EV0sjb)Tk-R>rcl?I(s&C7(Dzj)QV(*p7fvosqOO+)^#EMJv!Y7*V&?PT> z7TLMEp`U8FOEw|CUzI!h4f44!0neEAmK&S{33jat=Q4Yck(ML!KEX>ajr`WfYTGg0 zRgOV#wy*qC&_^)hz>T^Ts_u27`>L?&u2^M6i(a9t`}76phR?L{2Wnu zc)nFhvZra#?(5x8x|~a-Yf67kmrJ!T$im9^xo9hqaa~0CGp^aXJQW;d5mJG->99k| z;*lG5`Dl71RUp683-N$8=@(+&2|x00j(aw6FKNF7wmHiGW_AcQvn|!a(-j^CaXRI`?&& zpnIv|&5jkNO#bPM-4<1o;1FAR(jA9m$&FO#b;#F7>4>HI1bhASIDB@UK)VX_ zLFn8ddrKLWP|tvyiWSQP>Tvw#LW)2MdhsN=s(F(PI zE+c?tDe_bc{{W{>69ZEaZ)2!bD0#(vsNB4bGWCU}kL3~SN_|l^B8ACeRLd^pCXzZ6 zl8I}d+MbAn6hB%m-MWo*`$L@Zc>0I&c`O=HEe9D)u#?zA#Rkb#m?yA+S;d%-Q zqJw-Xkh}7U3?}72%jmgHG^EJ;AFDQ&S`BAtO$}``E9u%y7RQSaLNBt1I+xA+ZobyP&uRq1U1(c4gudI_8bTgQG z?_4OYpvD-yO{&xD3@jTlZfCtZrqQ;vrG3k4AJCtnk}cXcz|EB^%&^J9rQYZBU^YN6 zo0FJc(e6H7?{IX>9jmFK=Wq?h9Ly&UWwoyoqb1C>TRiMPKUT6Zy^R${J9+oPC-Z7d z-fp?Hn4o@j@T~7s<*z2%O?M8kfeFR>4!WNwt{EscCK z`-Ni{i37oBK?WbR*lL)UC=FnZ>lm%l@>;(BxG5_eNdL2M%xCm=H|=E;y-KEyF|YXt zoHu5Z-i}eftl9PJm5_Fc z-pr~5r`6Ey&qUT~w8t2pFm#|S>M;l18RpjDB*w`67>@t>r)*}^3Af0#h2i+9d8Sq7OvuP#e?yk~?$#tw3H=@Lhy| zFXe~O(y!S0;q9q|sM=|q2;}6J@t1mg^a_{5`~xlaT_LwB^#=pH-#H#;j_DASsP++F z!-A8uD#i*n9=AWxX5vj;^~Swfm!lkfdhYZjd&D5~cPV206F?f>_~@nTJbn{@e`uih zjpiUM2Gw;TXSwa=Q4i#gwrYPQ6-|cEwC_u%q5Whq!62DwAmPpO_++Fuq3L<|Ala=L z*u=$>?_%$Ntm%%QiEU4V`C}i^R+O=Tb8BVn@AAxx|1z+k0^xlMPQle13}A2QEi_EL zD=jP(1hiYt)tQ><+3^qmq!L?c1s1Fq`J!1Mm~|!9)7|-O)Z$T|!5X#{>~KCJi~YtP zqfBnX9`kDkIz4ZFc>WWB$qP)c!tM}FQKldZZwbTGE(rrW5;H|!k^`(rJb){J(a*Tx z7D zW*&;IK(QL2=_ZSBR7>*-mx#aW8HW{soR-(`V}x-sd`YatP^$6Hc?~mfmgfWNk37s- zX;Q!k;_kT3UmoAK_CoL^#hs1>S5hcx96+P}GEQ3$u`iT{&Zd-?+}07VoPJzxjJ(!2 z*)#%2w|`)tm9s!u?=gcy_w^ck13*@Zp(6);Ubn>=p8)A<&>u>j8pb8lWh*u|y*)^8 zjJJv?<0$(8PiD2tLCN{;hwAnRY;Mqp7tcAi8W-YFdZex0~T)uiY z7l}iFYkvoRZzPFr(H3Y(tTobDM_5K2=M4m?wmD@ULbP98pQXJ8nZEJCSnC1v+&G(b z^Ko&Wk{379;+j-_FGTYAK0=yXqVxZ>_m*v0Hc|UGARs9v-Q5k+4bn&|AxL*fcXxwy zcS<*ibT^0~-Hp;6K675zb>IJQ@VvTTvTY~B$sEVbtXXT{f2&V>_I_N^C*`IxgALIJ zg*#z43Z=z?th8NWD%1W;Od-Y}oEwCd-!H`DNUVjIXX8yY6_7B&l6r7p3QnRjEy{;2+g2c!z~L7(qY zZV=4_48}!!yaTKQ>urA3gk=%iqr}rsoiPwL3mjZ z0Vj_~O&*)Yv-#%>w2-%oXrqQcjALK(ybcn3s4Qg+INU`1zj_(;^X@F}b*FKJ9s?vm zTAgr`c+sENG5XU}GWyt|?NsJ1Z-HGOy+DnoEIbd0ZP+~-2kV{#fXDR}t9{7fF78}F8XCigfjR}H8lV54VWT|)Aef*^@OH<=^HH}W4 zKfLgPbD->EXJ|&|gO-o~fvslOVqoOO4r1ml3^uxT_^AD4{LPQ{*iOvUTWmyimE5s? zvaUesPK*&kiY7O+Hca5fA{bn40Z2^}X>IkrZ>w1uZiYYAU! z)%dw6JdSuqz%4u^P9#cUuaDzJHkpji&!onbVga*1L zMe{i4OA498EU7%zlK5zE^We-yxv1ENHV5??M!ZQ|Thev^wO48lux!_to+=GR?5>zH zJRqHU@JD2_G4vE3HFPOZYn3gpHRv?_r3Rl(?p{D6Ry^%_2$^4tzW0gnKo5ROacs60!nVjNtMz-|6;MCTrV zRdkj6`sv{Y7r;>vh!_T#QBWn2l8&G+ItQapASJ%B>y*YOl4rxy_3pBm?_v1;V1?BG z&FoZ^*RcylFtnCk*qXLmYOpByqZ?M-y>KzjX@+e2F=A@DkVVLklh;XWT@%I94w8+k{M7ACL^f#l|E7wu(@0STS-T9bvD%;-t< zMf6nd^!^<_K+C{!KAgpfCg7G+SChnu188ixl z&3*=bLm>(^qrh-ay1P~i)WukVT ziRJ1c6|26Nlq17Oi^(+#ip__o7|xpf+XWihOsD72->w*A=%ef}(Z} z5&6z#Nm^1Ce1587?DfUQ=t4$%zbhUP_jiEMPTfi99Sx*xD4I5?hgLgofk8KIjFd#~ z;)PWAMO*9^!6;vzeY{9VO`l<24LG8&Vy}gRq;406)x#qZf?g|#B3ogmSG;K6K+kOz zAc`5I{sflM?$=I$FH47&w;HKnM39UeZ(jU<^33^jpN-i+G3=`$4{B5M4r!gn?E=pXvmyP~Q> z7Xs*oY<7jGL4*?(W5eQ*jJD7qtWU4LYnU1` zBf~crucbZcNk_(!i8#K3|L5ZyW(SxONj;coOuV)Sd$|_@00`?AR3l|jLmUZ!Py0XiDrde!DmJQX5*V6WwuIy zAJ+@hZdjQM*YKF#58^SPT+`!bmV$#hHUGzy?Lz#`qa=(TkQN;TV@?jkL0d97PLEqt zA+}&@wmFsY#}u(bD!>eo)7qrnttrrNcF|bYNJ6owAmMJBhHQ{u*w~`I-cy8p3RLSG z(`SIps3n>IB|M@!#Po#Ze|C9{m0Qg5;5~-(#ZL*U`}fXFCW2CIOFrcPN&^3g7Cvis z78cBF2FICcYZ^pNSQ3wY@_qxb@-1^9B8XMQsqNoc*pp7=WaEBX+|VPhY~rSICNQ(=rme0EZ!pmzMHHkL$=chP=}0eK zWpXJ%HT!)~B&`6VOr_!+_nTRDeotZFoE=T+(R$(4i`j2BiwziH+iA>*vH_r}i&?Lx zBv^6C6qwVcsU#yRtEKI%nk=2mK_%E1vV{-xzNWe0$Ncq&4Lop^K#GeYIPhFr_f6k$ zNW)$Z^MVL=TxE1Y9TOo3;+EkY*NB-xNu|Xi8((WB*etJgsMr$C9{2_mS4*rkFsvC` z#}^LyC(c;(;bz|YrH?dR;T_4t{6vHlM2SHJ#X-A8N3cu|?F-WHmxIwCo=Bm~8ixx8 zLefXPo=~J1SHY4yf{*^mj^HEJKNjhWkZZ#T6C96tn;~R>o24e~AS586O35}8D4+q$&a!7r z1Q(W0eK$X2VF(4i1(cFC81Wcn@_>#OmIgS-^n>muMAM-yU>=Q$rXI6a)&hx+%*h8m z`5*3pG87X8hmDMXK^8!ph|fWC-6Sp?C~f{1`I3vEtX8J{ITSXEk_~c6D*;R`wN!Pk z770i>Mgl+6pvo~$o6yo4DHNJJDOj-XR5r%BA$Uv3Td3TMR^_+Azh&%6PzDEKULW9Ln5>ZAEQb&aqt`yonC)Uvw#6qEal$?EozM3#fjJ*gb|2 z@YpiNkO*0t)d4NtbgQG;U;J}95VKA~mRwW>?HADo@QtF9Ssf>Z{VjeJ(;J)vq+Bjr z+7pWzGU`8Sm#p}Ka+yk0?l-_U$epxNi^53@c7EDH&s5{y;Zb~={nL%!eAxt=4dV$2 ze`*BibLyBVvPB^Yiee7ERVHRa*h{iqAoQgT9=3L3)QN{|Tg9dI3KiwEi^@h=J}1g* z_`*NP_(p1D1bJ5j<52#BkY+vt0Ol6(DHVW&Hx$p6!)t6T9##%AI2DPHEX*(l7@Y5# ziBAajbd%e72wlS<2(cUpi0{XUf#UgpTovTd;Ivl1?-_`6kQB`c3uF^}@$%J}$`RPa z*h0Ys?p{hUSW%4oaY4Ub^5*iD9-Wu}1Gce`F|MqA`68iM_W5RGm@-|Az}oly%ceWT zI%~;*6*QS2O+5vay&619eJXTdWrhP#8Ni1v(5qY8l-7oqyelh<3a!_n2V)7uxbldf zk?7PJhvRXX$LZ3|AH}2y$x}k}fGht>>1nDN%sKh!%L+4N|96GT_;`8MVCx&~Cpzgs z4{@2!`TJ-#oh$YtiS)yNe+eR+V!vccg?+@xy;~yX(MuwRx$nz>egXekD)ZQhS?7x( zWPV#N6iVCvnO|Q!Fu$yhd~+%cgP!fjS2LN#wgZ{72cL6bzkpB#%wigymay@ipJK59 zz52kQZj5u{&>0D->vyV>GKk?iA29Fk>6d8%^dbz*8KS>gEfqREbb4oR?vUV&c$dXt zF@WY zGTuWC$;*%dyAB;CeCE~hjzluruWiqCagzpba3afXxWy(na=My9Ii|N|1>L4WqMiiT zbMG#KI8i!S^XFG^l$`8QPWri7{)ymNsmF9tO^L@NOX(mEx+Y;WH>D zr4b$RcAZt#NOo{~T$;0cUR!@7f@g~*;sMdh8i?p1_ERxY$TV`Z{$Cjr2 zsYVmBX}}cW$_a+Ts>*<6jU*7Q(S#`4TNR4Xwh=cv>BHMySfWIRLwWflQQi1WEsF_7Jx46TbIIc|H39f;U6pO$eCf z@GT+UNkcx~CmO)=VAKXevtuU`@aWirz+n*L1p8GCBF8M|kZT(gV1lvr0nYB;{829% zGHu9FIm&aCjmf+{xLQHyxv=| zDfaRQ{UrAX#%DE%@tHVCG?qjtkqxnP7?@GuhGXV|Nuy5zXj8|!w1^O?6u~^q2z8xj z!2&j3KF!9uz^$TTCPZ9V7z*~l%S&u4ImnbM;}gi4zK(;U0c8MQm7M_Rwxjv#YGGjC zk|868q=5=@H03c7vY>#*GoGUt39%hi0^qtwK@pItMaeZ7-JKs1PuwrT0p%d9;k|^C z`Z`r`!*wotX6aVt30PzcL?^4MN&Lv1ZLFP-4m&)~;WEqRR{>xahTbzYT~(wLMT% z2s11YYJAOJ*%Y6Q?2{hb89X9qTP?#Y+Kpelx3M7U4JO0#TMRa>|7S$-wx!TII?{ra zaB2Yl%B0{03hhQq!KMnP3^w$DiHwG8zs`c|n)e3?|3WPflq~7p^+0j?c8X4RD2la6 zIXu}##EJI~D%(50pn9t5fSv^O-&$?CXG_%kqzs?Ig6@{oo*9<>` zs-dL!E}#I4dLT}PLhiKk6y>C6$WL1mN}Eh&59~44k2o_W<4#I- zXT%C(M$pw#oa!=kkckUQDFFzNm%|hZ^M=d&Jybm}RQ4x~Kh&%6L1L`ItdpVxG15A( zE{{CxT|Rekv+WQ)+bO%(bg}dyS5Qi2%exO3gkZxgVYu8&+8#E`+AsW-Wy`p%G{i^8 z94}0tua9=c=3@@G=G{;H1_-U3woHN{ftC(-XIjiw$M`fb#;0Y(NrJaW(CD?v#)lsN^F!iA8(_xiPTSI4=SO(>kBp+^1a6 zbBu7mt^*W_3UMymKcO23_6_AAJP?ubM>_yy`EhQ3gv6lJ{ z-6@)GFCwT>wr5XVsEU)ZiklQI z4gl+HQQ^yi%3`(Dpruie;H0;fj?8LJ@cWMib5JTtO)v;dyaL40hRlpKaWC^QZGz^e zfmY5bDUwudVD!*--e@wcpKN@z$vT)aEW0l5Ex%U!hf$FZi*$ESRzV)Y#G z%MKtxl6?<}yt5}p&t-vQ5NT2Ck8@KrEE34hGi`UjGeL;LW6vMM7gs;@eg8%xTc66m z0y)Wzo&k5Ch-L*qLGczu>@|?;H@gq^rwd%-^kmUE&K8Z3|PWA{t(mYipswrFGlA0{N9vWHSK?wMw6Oh&nb~4*fCeOb> zYyvmuk^-!&?sIc;ILlu|^}sBQSxvq)=OM}OA6s?5p+)xNzDjvdp)O~j`o^O_0^4D} z#imx42SmBtRaN%t<|B#pkn^OR4(Qb3fB{8wFgBxc@;RJ=nEpEPR{MpL&+x zOQOM|qgHQ2OfZieHnnRQ#AiX?{UdmXWXOE;ueU$!M=P*6c1Wf_8X2$1ki7+`uu)J} zQU3DSYCjjBmL<>9X0#}Kp6bmv$6e#Pg$DpRVlMsjq2oL$VAA5AELg(&JJndv$#g;J zuLg?*e&U{OVEUUi`vZIkAd3X3LVx1O>9LT*bd88vFE9d zP0bl0!XxLsW1hgX-}IRm?0$6%6#9Io8%I6cz!pNUkW4)Y7`DEU$*G={u&QNIeqYCA zWR9)>b^yyy`L37rdCF0U2Z(#`mU-XxS=$K>LGD)sW@mZH)NsQP6Ftq5VOp}2>~bi_`i!Mlo}=-E@Lrppy@S%wWK*oobesS} z8a%{)*BuY+q`w#!_BDcQ+Qw*k^N<0wpcaZ`LGL>wh6*BV{sOk5g`2O|O`RuhF zFKuW6H89FO|B-ij19;iT?_hhh+4sx1Y_t1nlxg^%dJ z#^g%!eX7rV5j`#^I2OxEz>fH#GSvpj^z%>jEUenyF>#WW4?xY3=VGjlbI69T)i5so|b)!jQx_X5OZx z`GUFrku=RCxhh(S7NZl6&Sak;?r?9?nl)*(KtVR%Bc_w&q)Cv9yki^$t`1>f-LBu;J}3v9zzZ@ixm2S56iHfqM`0r`7Legk$l`A-g?f2|64V@U&}uT8 zT)Pr`_U#U$63tlrP^qnvdp3tCH%{GR zU^=z`X@xisC%Nor_l;hhV1RV&6i~8j=uA1JztbwF_aVS|m=)WT*0-n0bfqJxm{i&HvN-w*(df=b_NevRXVm8xyk$ zwGr|5h7~+&tm75!p5|cd)DVR)Yx-l$pa7RPNWn;PGs>Mh%3GNiMi5xu4)1q@l0d8F z^*RoR+v)l*g$09x9y`<3cK7QXHBVc_NvT-j>fP_G*zvcA z@}K@KMiES4ZLdd-h(;HcV|X_JE`oM<;u_ZN1=;J$76PN}&@ZwIa-P`q=th4gtl?sI zeV^uYIpaIJ=~M005zMHglEl4sa}fpwDWy+1Hi#rhw&_UAg`1cZks7bibY{EVU-b8) zITwp4vO6sB=^cs%*h48s&u?F|y(OWEa&CYYr-ThO>>R8Kf`6=fM2MTP>SDGQo45M3 z`&;lbZAo5wHK@$+=EbAwzN_7JbpTA!!V^}hiyL}NTGLrC%279VX~1HCOZe*_x%sFq z3mhgHqO~s(gEAe`IRwp@gSd!#Tf|q3sb@!j-yo8+>_;YM=x@Hp`R?=T&_$oX73(PQ z+2#7>J=^4skYMNX*fV-f+86|NJLZGJ=l;OxyQ_98G_^ZBE# zZQWFL`nCSvA~GktuvT*Bh<&JE`cANwQpN=xrL&j%>X1aWP0~jPVo^w%;<)|DLz>By z7UoXL?snR*&W(^<-TCa%h>L=de#;_19D6;YIFyX`iC%I>A@z~Y|GlE`*`RPR#mQ5^ z8obYigSC;o&+46&I}IG5Sq&sMtNrRhWw1ii%yse{a_A9N2psb>T)tn6+@0_%f2ayj z(+OXD!^?{m^!f=p(p%8U-YysYpfG9KY%kKc_F17S4L6Nysblxw z`469a>bnkJN#$`^9u{4ch-#(BPOtGRe!R8ckWLQWeqHEJIc!@k5G;%E&fRgqmBUY; zIau!TPth9=*3Q2T(Jm$M_&rn)44HaP!pP_6`S`ECe|F3h#b=rP<>1)ydsijbVO8dQ zh^z@ZMOjQq45^1IW`|u2%AYLX%A&F{+i{eV*{F*zgdQLc94E~Dy%vxFaV_BCNsh~@O2YSJ^`@F)T5W?>{aG8PQYLaOo6}1{R z#_<-&9U`mHE zAV4WhYh~n^joW*e_w7J|PzrMZU!*MAA>q&A(6{k={W2qHpcq^OHjK05KW6@cYL73h zHp>x~mi3gY!A-S%I1IG5{!;a4P73%Dul|`zY89Uj!7yZr&MYg?I zf~#QWi=+U;S?1>D{ByrtNdwglWr5U+TK#UAEBC2fF)nXLf`ebvit3+@hK`ImtG~mI zGsEJffl?CX=T3dO7^r3LLLJPpYLJ1Ksp8;cZUqTCI9uggT{fwAAOp9L|B}T3yaI-R z2;w5#A_hTty}G3?J*AjQX1WDQJn=Au5II`XR&smdtBo41@p9|Us(QetbXh1xb2M5#^ZOG$*&`p7 zgXp-F4$I!^7eprCaot#LupM-MhD1Q{J&6%-|n#bmK_H2X8{^NM5F*C&U(%M9+dhct3Uo|cfaU^ zP(UEM*R%bC*a!f5|0R5qsHRkFlueN{>HWvC3}G+$0r3x`19ld;Z}hKisCk6auc5f~{eGA=C|!GG%wcQ0e9qKwm+&>bc)C`kJTZtLL6(wjnmc~Q2RpF&|; z*L9_+!volU`hX7)LgRhj5_y)0A{KbhW&LX^=JyiFz00xiY<|?0m;E%d-xegCI9-dH zcmYVeKd*|-X+aAdOQ5EUo_ap*jp2}GE@Ils3nlsTZJ9t^A7Jv!X#i^q>whc-&?G;mkJwRlZ4t!GZ}{AmPiQR)%?d!A&#?fX(jHZ!1E41aVCmO0Q7hd#CI zaT-9(J%b4MkyxyEyTKxkT+V8V0ZYe^T4v#oZFM=kPOnF6B)87jG>bJ8K zn9u52Op5YJJ2@IxBaGE>-n`nCYU-A4!r`{&E6y5y}(x1tA&IeCqwB4_8n9469=M_&^+7^{ zxe-KWcDoBBhj#E&%;wHNWB%LnE0~37ab+`9xMqkSR~h!nz{og9gytl>@c) z_lR?ZE#pMyzfAS6cv>8>ey~jS35Qstm6ZCn&n(S7CF)ww3FRbslymZ-)nh(?AJ#r- z!ASiqluhu`CEb<6+j3|(8U;O6tTZV4@gHcYtkU!}XqTG4Ffn|2dpX~B{!B~~}$46X%oWCNWK^fZS8XVrTw=?(yfglNKbz3b! z?0)+{zou}I_Nd1oBJX8;R?Rbs3!Q-cl*Ziw&qwDtfj7{pr)0-cS(n075MjB+Zi=(Z^UoseR-z6^*w)#q#*p-~w$BsED)pTZV`A#JYQjw+jvoAj@F zf@G_OQ;dSl)=K(7(J=u z+o9FoEYdFvjqQmblq~@kpbR*TQ_E|}PQ|PTc-nm* z`H&cX-=bfo&cn%8$WZG#6a5@ImVWA2$5s7eU89!Uhs5s#x;y-9Crv}-`yv6+BUkAQ z$^}y-5+fz%>7%e>-8v5$2mIY$vg>39hDZ_t{rZXzwakWs6n-qG{*>hKqz!Lq%(YcW z$9fX!<$h-{Q6lT;*35Eqh%FT(U$w1caeeBXmlzGBJ}~}r#;{}7%)D8;X<`B58<$gu zX2&nWIQ^-Eb}v;3a^uGe$mk!`ao>Uh7&97e+fQBDp3|H@JFJcqExG%r0X|A$`QHlp z%&+MZa`g%op8P8XW^wt1l6$9?z4ipw=yh;1yD`wUKizGYh&+{f({6z(gib;P38{&9Y@=3&yoTp^%&Bb6*eIXb+{2ohZ?C#`TIz= zLBX*=!02e=o(HGkNY>k#g_L5ggy-8qk;U#o6QRJ=Z}BgKDs$b3+EN>W^Wm|eXI$&?I2ySy1^t_uTSl01kpZjRDRH;5;{;DDtB_jmg_kz31xbO8`mIjM;X{ zBg1s=;W8U!3Y4C#ZltgrS%1vHU0_R7GkS@Ik=6szAQHvoZNvCtr-8&ei)-}SCV9c6 z?GtF3@IlAZ$g5vVYz=muH53=#oSEFZ>-cLYy8E~$u3Nhv)=zi0AqbJ3;Q?asa``q3 zZS}JFGwkl)X5R;%f`$_h;=n%=siM9vM{|l6lx4&8N*b2mB62V$d4OJx8d#$x8!&Er zGd8PUqlqPBCW8A(F04ema5U${^Pcas^#0d7eU}^1^eig5(d;Lfm?R z;FRahM8O~FgA|PFuMiZSGmYS8Jk)AfUbJK?b|GMqM(&KJETK`wYV~;#7#eVZh(sJA z2&1euNNf2n-wBw|%^5MsTuT;~v>#Sp`3CRcRXTUTk<89T&_T<|{FwnbI^5T<)JX58 zEh(n$?v2X=67KT+{77xQ#Nf$&d7bv-YTk3HwZgNzQewe!g}Thh<5fTa^uBi~zO~%f zsX=$342Vn=cm++~`sdq;@EM4pOWV?FjJ}xk!IBvK+-RubFcjXvECp()7W|ORCDP~9 zm(xJs;9SrZ{es|?dD%={F7<^&y@G^}LM0_vN*gIN4ARk2d4{H4OL`Zps*<4iHjF|S zr0);x<~g%&n|dZNp3=PkZk@HxgFG>^Tm|k6{q~9MAy%W z)M=F(y8$ZD04blR9r8NeM~&9WU<0-IGejg8YGmsGG3{C+CqQ~2u9ybtca6gkjBpis zTpcbPB~UJ7{CytoIznRKxhLQZ0EKDm&>g5Xurn__htl-DXa3;YS2)`kr&)!%cmu@4 z$=_!8Pk>^Nc?!o|f11>;{Ve1=6f~E@el;6rPu9-H4J>oEtn8;cTnzDm_T}Ec7=Ofe zDA+`|m_PjRe>20Z>%Nn8$mSzKL(?W6JkR)=uGc8-E_2oQulE_aOK1(rV!QEno2hnM zybe4Ca~cWgvA7TX$y;@+wXc(V#mN#ydHGz9Q-9T)#ip_u1bjxmwZ7k`OWkqu$Mi`$ z%~CL$p_Fd6#=jy& z&P%ReJ0AnqIaw8dgQlYqC%q9) zT}nS}<;&XyKzhBIVWOP0R|;d$kTs8Gk0ka*hpj=H0&QbP*C&f|wbrPG2uX~v)5X&Q z`q)Bze*@`P#v# zN1C$K?MPRdb}{fbBkx6N-eox=i-C%obH3eL$3(T^Q^lkMx2`-6BOYn6tm7N5_^Ho# zsJ#aH#iBRTfyp!nxa6cOTOP91SWjdhDT2`NaS6;WdAwP4YiWE=!Nr70^UbWCh#1{d zH=2)rCy|GU|Ub(G(fADX;>tDfyAJ@<7a zs@>hzxDRZ!6wkUwy6VZG025{QthUx1pBfYxn(5};v^{QdF5iYJw)m&%Y*}JLB9Mdf zcZm={{Hsvl;JB;;I`pG!`x-m+Q#}pLWtk?}D2laM502Ys-DIQFCv00N@UhfF$^Dy= zsx`EgaNQ+0C-ZTsn~60wSx*37^DF7sOM|vIqh6U)5`4GdHda2HTkem#FTvk1CcNlf z3^D^99p2>^_oIr!Yyo{7Vo~Bn0|zuIm3L_ycB{nvS=Q)X4q~&opS-VrwowPqeQeqG zRSjHn+i=;iQf?Y`9BnO1a?(C+&OC#`Z-2U8+7gP~Dq@lnGwF_@sHb6|Yp>+POPqG= zPL&NjH1kf*%aYdW`>cNx*>qE( zikYF^oo31OU4|JBzkG*^Nd23G{GF_g-=2S3OL|6!x8+TpIFSIKyNSE^%$pU-);YTr;2+;-1+Di8$+N@CZzF27oZ3`bMkn zU^{3wHSNv--ibmY9S;c)1@Dz^p%aX@(gyLtwR7J)TN9y9mX`5^J&uvbIMh+DATm_u zRB36OEYIh=z1aC6Dv>5pV~(Gui2pE%Dm9fNDkaU*!Z?}VWujEv`j;fQ?t_j%_@k#F zGxNUhtH{G~{SOx1`yvTAQ(#ZMew&k8&v`=5T7!AZ`=3w9o}i((4CVGD47rFB8*q zu3S)aJV_T+9VM=*c9A&ujFvT%J`L95ns$fIFdd5HJV$WM#rwMRMR@<`wSW*&iN7gG zd<2OG<}L@kpb%H0qFLCUya6yoM8QR*yI&|#e#Z86bUBk4;MrFledAq_D?~1j+)L9j z8cR6+GmSoYa{}thA0MGPTR50642oUgF)OnX=(o`IrXF5|q%9WI4BhVkGKwWm*;;QU zS^=G!3TU(43}3g$7qD>&BS}mbtg8?Oo20K_`A@DMo#PxQ z-@I28I%a%bhXfKYGcmD9HksrMg_7~5@%5S*@B+5Tu~KV@;Ch$qSq{hk@SzD^PThLp z$k4Fg1@op0tL?YWTlHgzEc|4%&6(6({2qRT6m8G!!jGci+q#Vg={r#WO!a`ClCCMu zp>$j@CXmobsE9d(g=NKiorULNib>6$h2j}HT@<0|W_rMRJofn()UFw6FusFMGRW_` z(ysUr3Bx_T>X|SS1RoTv^9Zman==-vPpe((Ih)@g)F^YY3WZaQAmzVaW^Ekh-=>481=I54`}L#Hx#JMjL-V zntgu;>4-{HTvO2|AtU$qe1dk&(Cy}Umh96q{b}WA->PDc;l)LTf08Z@vM)r52wxD_ z)dZpYwqv{T~pU}uSOZM$dapiM2 z$|Xp+`FOE>W9y&(`o@_}Ue9}ur+%mzX^(e1D-WkEZpPrm1mz_Mm*SSJ(c;`%j_>or zH+EU6lvhs{3cG6Sh$k{B!*W8yy7GVqU>1Wag(6KrAeiK|jK|RTYnyZJjBrn1J=st} z^^>I07Y|9Jx}}*0h~g9;(uG5$`uruo9{U-KA}0;s9*`QJ&89UUR*;s}9oNs~j;Tvo+u*6)^ZD%AzkxxK=-;0DL=`_7Z&06l{bz+x#A6vIZNKsr6cl>0 ztfZK_KO}F^!O!h-$oe$J;1A?J^LMo`kh9`6-Z4G&^CvKsPN<6gm;r&#Asa)_iyqiK zixQV-A>_)z#DR{16G7W7Klq(Fe3vg0ZCy-Bv%WoSEf&Hs>c?n;r=s5R@Fvj|8E9@% zh(}}_8k97n4ZP5Yh%Utu^xQ6+t|Uv;ekEDoCyYD*tZi#BNoX^V3Z8Flu?lk0wkLA9 zFn}=nEcYg|j82~EOEOj24{WlXqcZMnc6<+$T5|dH9>{~Jt%K+D=Pi|4%aiD$ti&6SA=~vZsmKA#0T2!?TuIcyf`bYBO(c(MmOg6snh8DJISQb#b%=5 zx8e}cG~n9}loN&S;R7I#Jt|L2D4Xyn6N}*~T)@lP>3e7N+XITFS;FESO#S&J&Ardt zzG|JYJwa_4SF*Fn0bQXNHFyTf~_}@Qy=oCjIvW;>_Q+fgo%$ts`PoA`* zOeSr0Vg#aV-#)`3bafmZSi&Dnxk7JN2Zi_99}}f?S!KQ zt-Z;R`hnV6w4lYB4hxhuWnB6G&Rx^-o1VB@Za4lrX*3gB&86|gUzsh6#MsyZ2dOkz zU4;0Ijvj>ozB{;$_4Gyn#?5UXs;_T?%L!fK2Vz&~Jvj|cj)lTof>9C7`gA1@qh!vI zXp2@@H&(-p;VdoJnY5LaBT+Pyn<$-DiTY+pZnwfhM!QvZBdvz34*(!FM1o1G2&C9a z%@Gi`W04(y&_SAd^bd7fi=hFYFKhJ9et+G;yUtH-0=OorjRaW;99bWhI+wj~R-Auf zodjdqFVYC|i3lYov>M+q%dhH74)mela=1)|-;6$mb!QPI^l`LG$o$dPVI_^WZ7YDY z_s3H#zJd)aQJUBC1~z(>V6E7)6gvnsivK!`LxCyBf)DmHQz851aaR$0C($)Q;^@Zm zh%Vk@k25KjM8K;O^(}*GC%3>!_`X3^)z$bs`s7y;hy}yL=J5l6W4O}o>0fu`z1V5W zx5B@=Bb8XgyJi2q`h7|2fimhu2KT&flF%Wh*=|MaWVwczvcs zkQgs1HnIbdHi=Tx*^w>{L+4hqZd!j*p}gTf%iznkN9& zrV5w&V;mzY<2d4S;EsR$J9`e3Z$Dx9hc781eP-=W62Ja1G8Ag5TnG(>aNbem{p96H zBS$qOQ^G-8LC0eM0hX!z(>_50cZ9Xb)bP7ewpC6ds1Sz6lDc0wl9R?G_ohRog9dr8u@8*VH869Q&ei zz&I4HDzJjqeJX^gd=_U@)~0(>Kz|KR%Fr&=!L+K7i*QxqtIoabMYLx(SxZ_{#7E>ZR0V-gZ!aT?e7xKwgyaj zd!I_W6rzHqmwD7P=rku)KepQjy5w3(N)Rxj#i-K=)?2v2rV}HNNNAE67ok+jPKL4D zFyug8Y1J3_+1{--@Ihx%H^F5%9&VwajtG~!I=y2ktWnx&ONkyRo0U8f`W>zsuZ*vA zms~#WvOT~lRQ7Sbm5y&(HhtE8CQ_sB{rsr#aA~1leL{P-#-pkB15Le5>E8l)lfV4t zx-ck1rbar8Q)D4x^#=W^AN%tj=a9QZIWlLvl;6_v6uIxlH}hqZ3cM>48WoLLOD0V? zUz+8+YRU?|GPgi8?{%ZyBt79nQM^N+${mx&t&|h5uH>u0BwWh;&ijMlJp-0!ekD$P z(y;Xx+>AOUqpWZ2Azn^j(uqa4n=cwTq?~$Rrm6eLAHTM_jYED8>z3h_=c3q}b~O^q zCH_<_w!psLQJ=G2xm{G)Gh!h=$`wn6%A6D`G~FSux_Xnd{X0Aw&48@cIe(vs`@33M zdn;j&drHW8Va6^LUfl-Q@KmwT&~IIq>IP@pT6hyz>-u4LZPr+Llu6R0_oS$JH+}0_ z+p%EF&NM_i0 zPr3?SQ)BXCg4@^7P)FLYev~_Lng3{~Q2$ul{oBDjt(}L1BK`&bx5PY+QqyE=Xo8~g z5&me`dH7|f9bE~IQHt3N<-{~u_@Cv3$v;qJv%ekPH1L>I|B7Crr`2SWkY|GT2F2Dx z&~WAJri(@bUXWZR#Hmpp$=cDvzY&DByE+#aY_{;~{Kq?y%8uL3$TOjm6;>1zb&v?G z2FyGaVz5~!^qmPHjw$+`-}E4}sskO}se;KHLDiz}`9`!(h`Vi6`@`Jr%8(PR3ka=<*l-z%_>wI!) zXOw*EhbQe}d+WSp(;OW=hpKczi)fwJ)ozkSI8(OB zM<-iZxy8=U@_&N1W+5F?z5*tZk~QP8b&}RCH{Iy04z22sbsrGU`~;0{TTQKc-`L@V z{QL@{st*v3ZnR~F{gAo{^=E zj*Udgm?W<&c@@`Xnk^JJ6%Hwxeg$`8z}d1=970rzfV8LZ0BBcQ%P3b6lL{G02X2sJ zVPIbr!JDv`kH{-CAQ@8qkTjWcg-agUGDB>Tf(oopiM{uqZ=rFW+vp`J-jVP0A4&%nW zRXAv&*L4Y9z#|kx(jRVqmI;y)0zMxdqY{|zIFy7VNxXeL!3#8>DRMv>H3avnw}H&U@+tSV5yX`!V)Gy#;hSPw5B7s7j0UC;CtakRpwNlx#dELxlYF&v$(Ve$BzDsPKP(jZ7YfAW^GaD({~!2>F;OA1oK$=ycK- zx&MBN%%2I-T-me?C-}?%j=cjmMJDMFatvTP|NU4URjycCk@jo3fBzErN{JGpfptW> z^kx74^WbCu_l5p%hW_sz`e(uX-zxk6aUWXmj3O2;;>vf$2Dm~fSt%vSDslaQ{}1ac Bg^B - - - - - - - - - - - - - - - - -Regression Discontinuity Design (RDD) with stochtree - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - - - - -
-

Introduction

-

We study conditional average treatment effect (CATE) estimation for -regression discontinuity designs (RDD), in which treatment assignment is -based on whether a particular covariate — referred to as the running -variable — lies above or below a known value, referred to as the cutoff -value. Because treatment is deterministically assigned as a known -function of the running variable, RDDs are trivially deconfounded: -treatment assignment is independent of the outcome variable, given the -running variable (because treatment is conditionally constant). However, -estimation of treatment effects in RDDs is more complicated than simply -controlling for the running variable, because doing so introduces a -complete lack of overlap, which is the other key condition needed to -justify regression adjustment for causal inference. Nonetheless, the -CATE at the cutoff, \(X=c\), -may still be identified provided the conditional expectation \(E[Y \mid X,W]\) is continuous at that point -for all \(W=w\). We exploit -this assumption with the leaf regression BART model implemented in -Stochtree, which allows us to define an explicit prior on the CATE. We -now describe the RDD setup and our model in more detail, and provide -code to implement our approach.

-
-
-

Regression Discontinuity Design

-

We conceptualize the treatment effect estimation problem via a -quartet of random variables \((Y, X, Z, -U)\). The variable \(Y\) is the -outcome variable; the variable \(X\) is -the running variable; the variable \(Z\) is the treatment assignment indicator -variable; and the variable \(U\) -represents additional, possibly unobserved, causal factors. What -specifically makes this correspond to an RDD is that we stipulate that -\(Z = I(X > c)\), for cutoff \(c\). We assume \(c = 0\) without loss of generality.

-

The following figure depicts a causal diagram representing the -assumed causal relationships between these variables. Two key features -of this diagram are one, that \(X\) -blocks the impact of \(U\) on \(Z\): in other words, \(X\) satisfies the back-door criterion for -learning causal effects of \(Z\) on -\(Y\). And two, \(X\) and \(U\) are not descendants of \(Z\).

-
-A causal directed acyclic graph representing the general structure of a regression discontinuity design problem -

-A causal directed acyclic graph representing the general structure of a -regression discontinuity design problem -

-
-

Using this causal diagram, we may express \(Y\) as some function of its graph parents, -the random variables \((X,Z,U)\): \[Y = F(X,Z,U).\] In principle, we may -obtain draws of \(Y\) by first drawing -\((X,Z,U)\) according to their joint -distribution and then applying the function \(F\). Similarly, we may relate this -formulation to the potential outcomes framework straightforwardly: \[\begin{equation} -\begin{split} -Y^1 &= F(X,1,U),\\ -Y^0 &= F(X,0,U). -\end{split} -\end{equation}\] Here, draws of \((Y^1, -Y^0)\) may be obtained (in principle) by drawing \((X,Z,U)\) from their joint distribution and -using only the \((X,U)\) elements as -arguments in the above two equations, ``discarding’’ the drawn value of -\(Z\). Note that this construction -implies the consistency condition: \(Y = Y^1 Z + Y^0 ( 1 - Z)\). Likewise, this -construction implies the no interference condition because each -\(Y_i\) is considered to be produced -with arguments (\(X_i, Z_i, U_i)\) and -not those from other units \(j\); in -particular, in constructing \(Y_i\), -\(F\) does not take \(Z_j\) for \(j -\neq i\) as an argument.

-

Next, we define the following conditional expectations \[\begin{equation} -\begin{split} -\mu_1(x) &= E[ F(x, 1, U) \mid X = x] ,\\ -\mu_0(x) &= E[ F(x, 0, U) \mid X = x], -\end{split} -\end{equation}\] with which we can define the treatment effect -function \[\tau(x) = \mu_1(x) - -\mu_0(x).\] Because \(X\) -satisfies the back-door criterion, \(\mu_1\) and \(\mu_0\) are estimable from the data, -meaning that \[\begin{equation} -\begin{split} -\mu_1(x) &= E[ F(x, 1, U) \mid X = x] = E[Y \mid X=x, Z=1],\\ -\mu_0(x) &= E[ F(x, 0, U) \mid X = x] = E[Y \mid X=x, Z=0], -\end{split} -\end{equation}\]
-the right-hand-sides of which can be estimated from sample data, which -we supposed to be independent and identically distributed realizations -of \((Y_i, X_i, Z_i)\) for \(i = 1, \dots, n\). However, because \(Z = I(X >0)\) we can in fact only learn -\(\mu_1(x)\) for \(X > 0\) and \(\mu_0(x)\) for \(X < 0\). In potential outcomes -terminology, conditioning on \(X\) -satisfies ignorability, \[(Y^1, Y^0) \perp -\!\!\! \perp Z \mid X,\] but not strong ignorability, -because overlap is violated. Overlap would require that \[0 < P(Z = 1 \mid X=x) < 1 \;\;\;\; \forall -x,\] which is clearly violated by the RDD assumption that \(Z = I(X > 0)\). Consequently, the -overall ATE, \(\bar{\tau} = -E(\tau(X)),\) is unidentified, and we must content ourselves with -estimating \(\tau(0)\), the conditional -average effect at the point \(x = 0\), -which we estimate as the difference between \(\mu_1(0) - \mu_0(0)\). This is possible for -continuous \(X\) so long as one is -willing to assume that \(\mu_1(x)\) and -\(\mu_0(x)\) are both suitably smooth -functions of \(x\): any inferred -discontinuity at \(x = 0\) must -therefore be attributable to treatment effect.

-
-

Conditional average treatment effects in RDD

-

We are concerned with learning not only \(\tau(0)\), the “RDD ATE” (e.g. the CATE at -\(x = 0\)), but also RDD CATEs, \(\tau(0, \mathrm{w})\) for some covariate -vector \(\mathrm{w}\). Incorporating -additional covariates in the above framework turns out to be -straightforward, simply by defining \(W = -\varphi(U)\) to be an observable function of the (possibly -unobservable) causal factors \(U\). We -may then define our potential outcome means as \[\begin{equation} -\begin{split} -\mu_1(x,\mathrm{w}) &= E[ F(x, 1, U) \mid X = x, W = \mathrm{w}] = -E[Y \mid X=x, W=\mathrm{w}, Z=1],\\ -\mu_0(x,\mathrm{w}) &= E[ F(x, 0, U) \mid X = x, W = \mathrm{w}] = -E[Y \mid X=x, W = \mathrm{w}, Z=0], -\end{split} -\end{equation}\] and our treatment effect function as \[\tau(x,\mathrm{w}) = \mu_1(x,\mathrm{w}) - -\mu_0(x,\mathrm{w}).\] We consider our data to be independent and -identically distributed realizations \((Y_i, -X_i, Z_i, W_i)\) for \(i = 1, \dots, -n\). Furthermore, we must assume that \(\mu_1(x,\mathrm{w})\) and \(\mu_0(x,\mathrm{w})\) are suitably smooth -functions of \(x\), {} \(\mathrm{w}\); in other words, for each -value of \(\mathrm{w}\) the usual -continuity-based identification assumptions must hold.

-

With this framework and notation established, CATE estimation in RDDs -boils down to estimation of condition expectation functions \(E[Y \mid X=x, W=\mathrm{w}, Z=z]\), for -which we turn to BART models.

-
-
-
-

The BARDDT Model

-

We propose a BART model where the trees are allowed to split on \((x,\mathrm{w})\) but where each leaf node -parameter is a vector of regression coefficients tailored to the RDD -context (rather than a scalar constant as in default BART). In one -sense, such a model can be seen as implying distinct RDD ATE regressions -for each subgroup determined by a given tree; however, this intuition is -only heuristic, as the entire model is fit jointly as an ensemble of -such trees. Instead, we motivate this model as a way to estimate the -necessary conditional expectations via a parametrization where the -conditional treatment effect function can be explicitly regularized, as -follows.

-

Let \(\psi\) denote the following -basis vector: \[\begin{equation} -\psi(x,z) = \begin{bmatrix} -1 & z x & (1-z) x & z -\end{bmatrix}. -\end{equation}\] To generalize the original BART model, we define -\(g_j(x, \mathrm{w}, z)\) as a -piecewise linear function as follows. Let \(b_j(x, \mathrm{w})\) denote the node in the -\(j\)th tree which contains the point -\((x, \mathrm{w})\); then the -prediction function for tree \(j\) is -defined to be: \[\begin{equation} -g_j(x, \mathrm{w}, z) = \psi(x, z) \Gamma_{b_j(x, \mathrm{w})} -\end{equation}\]
-for a leaf-specific regression vector \(\Gamma_{b_j} = (\eta_{b_j}, \lambda_{b_j}, -\theta_{b_j}, \Delta_{b_j})^t\). Therefore, letting \(n_{b_j}\) denote the number of data points -allocated to node \(b\) in the \(j\)th tree and \(\Psi_{b_j}\) denote the \(n_{b_j} \times 4\) matrix, with rows equal -to \(\psi(x,z)\) for all \((x_i,z_i) \in b_j\), the model for -observations assigned to leaf \(b_j\), -can be expressed in matrix notation as: \[\begin{equation} -\begin{split} -\mathbf{Y}_{b_j} \mid \Gamma_{b_j}, \sigma^2 &\sim -\mathrm{N}(\Psi_{b_j} \Gamma_{b_j},\sigma^2)\\ -\Gamma_{b_j} &\sim \mathrm{N}(0, \Sigma_0), -\end{split} \label{eq:leaf.regression} -\end{equation}\] where we set \(\Sigma_0 = \frac{0.033}{J} \mbox{I}\) as a -default (for \(x\) vectors standardized -to have unit variance in-sample).

-

This choice of basis entails that the RDD CATE at \(\mathrm{w}\), \(\tau(0, \mathrm{w})\), is a sum of the -\(\Delta_{b_j(0, \mathrm{w})}\) -elements across all trees \(j = 1, \dots, -J\): \[\begin{equation} -\begin{split} -\tau(0, \mathrm{w}) &= E[Y^1 \mid X=0, W = \mathrm{w}] - E[Y^0 \mid -X = 0, W = \mathrm{w}]\\ -& = E[Y \mid X=0, W = \mathrm{w}, Z = 1] - E[Y \mid X = 0, W = -\mathrm{w}, Z = 0]\\ -&= \sum_{j = 1}^J g_j(0, \mathrm{w}, 1) - \sum_{j = 1}^J g_j(0, -\mathrm{w}, 0)\\ -&= \sum_{j = 1}^J \psi(0, 1) \Gamma_{b_j(0, \mathrm{w})} - \sum_{j -= 1}^J \psi(0, 0) \Gamma_{b_j(0, \mathrm{w})} \\ -& = \sum_{j = 1}^J \Bigl( \psi(0, 1) - \psi(0, 0) -\Bigr) \Gamma_{b_j(0, \mathrm{w})} \\ -& = \sum_{j = 1}^J \Bigl( (1,0,0,1) - -(1,0,0,0) \Bigr) \Gamma_{b_j(0, \mathrm{w})} \\ -&= \sum_{j=1}^J \Delta_{b_j(0, \mathrm{w})}. -\end{split} -\end{equation}\] As a result, the priors on the \(\Delta\) coefficients directly regularize -the treatment effect. We set the tree and error variance priors as in -the original BART model.

-

The following figures provide a graphical depiction of how the BARDDT -model fits a response surface and thereby estimates CATEs for distinct -values of \(\mathrm{w}\). For -simplicity only two trees are used in the illustration, while in -practice dozens or hundreds of trees may be used (in our simulations and -empirical example, we use 150 trees).

-
-Two regression trees with splits in x and a single scalar w. Node images depict the g(x,w,z) function (in x) defined by that node's coefficients. The vertical gap between the two line segments in a node that contain x=0 is that node's contribution to the CATE at X = 0. Note that only such nodes contribute for CATE prediction at x=0 -

-Two regression trees with splits in x and a single scalar w. Node images -depict the g(x,w,z) function (in x) defined by that node’s coefficients. -The vertical gap between the two line segments in a node that contain -x=0 is that node’s contribution to the CATE at X = 0. Note that only -such nodes contribute for CATE prediction at x=0 -

-
-
-The two top figures show the same two regression trees as in the preceding figure, now represented as a partition of the x-w plane. Labels in each partition correspond to the leaf nodes depicted in the previous picture. The bottom figure shows the partition of the x-w plane implied by the sum of the two trees; the red dashed line marks point W=w* and the combination of nodes that include this point -

-The two top figures show the same two regression trees as in the -preceding figure, now represented as a partition of the x-w plane. -Labels in each partition correspond to the leaf nodes depicted in the -previous picture. The bottom figure shows the partition of the x-w plane -implied by the sum of the two trees; the red dashed line marks point -W=w* and the combination of nodes that include this point -

-
-
-Left: The function fit at W = w* for the two trees shown in the previous two figures, shown superimposed. Right: The aggregated fit achieved by summing the contributes of two regression tree fits shown at left. The magnitude of the discontinuity at x = 0 (located at the dashed gray vertical line) represents the treatment effect at that point. Different values of w will produce distinct fits; for the two trees shown, there can be three distinct fits based on the value of w. -

-Left: The function fit at W = w* for the two trees shown in the previous -two figures, shown superimposed. Right: The aggregated fit achieved by -summing the contributes of two regression tree fits shown at left. The -magnitude of the discontinuity at x = 0 (located at the dashed gray -vertical line) represents the treatment effect at that point. Different -values of w will produce distinct fits; for the two trees shown, there -can be three distinct fits based on the value of w. -

-
-

An interesting property of BARDDT can be seen in this small -illustration — by letting the regression trees split on the running -variable, there is no need to separately define a ‘bandwidth’ as is used -in the polynomial approach to RDD. Instead, the regression trees -automatically determine (in the course of posterior sampling) when to -‘prune’ away regions away from the cutoff value. There are two notable -features of this approach. One, different trees in the ensemble are -effectively using different local bandwidths and these fits are then -blended together. For example, in the bottom panel of the second figure, -we obtain one bandwidth for the region \(d+i\), and a different one for regions -\(a+g\) and \(d+g\). Two, for cells in the tree partition -that do not span the cutoff, the regression within that partition -contains no causal contrasts — all observations either have \(Z = 1\) or \(Z = -0\). For those cells, the treatment effect coefficient is -ill-posed and in those cases the posterior sampling is effectively a -draw from the prior; however, such draws correspond to points where the -treatment effect is unidentified and none of these draws contribute to -the estimation of \(\tau(0, -\mathrm{w})\) — for example, only nodes \(a+g\), \(d+g\), and \(d+i\) provide any contribution. This -implies that draws of \(\Delta\) -corresponding to nodes not predicting at \(X=0\) will always be draws from the prior, -which has some intuitive appeal.

-
-
-

Demo

-

In this section, we provide code for implementing our model in -stochtree on a popular RDD dataset. First, let us load -stochtree and all the necessary libraries for our posterior -analysis.

-
## Load libraries
-library(stochtree)
-library(rpart)
-library(rpart.plot)
-library(xtable)
-library(foreach)
-library(doParallel)
-
## Loading required package: iterators
-
## Loading required package: parallel
-
-

Dataset

-

The data comes from Lindo, Sanders, and -Oreopoulos (2010), who analyze data on college students enrolled -in a large Canadian university in order to evaluate the effectiveness of -an academic probation policy. Students who present a grade point average -(GPA) lower than a certain threshold at the end of each term are placed -on academic probation and must improve their GPA in the subsequent term -or else face suspension. We are interested in how being put on probation -or not, \(Z\), affects students’ GPA, -\(Y\), at the end of the current term. -The running variable, \(X\), is the -negative distance between a student’s previous-term GPA and the -probation threshold, so that students placed on probation (\(Z = 1\)) have a positive score and the -cutoff is 0. Potential moderators, \(W\), are:

-
    -
  • gender (male),
  • -
  • age upon entering university (age_at_entry)
  • -
  • a dummy for being born in North America -(bpl_north_america),
  • -
  • the number of credits taken in the first year -(totcredits_year1)
  • -
  • an indicator designating each of three campuses -(loc_campus 1, 2 and 3), and
  • -
  • high school GPA as a quantile w.r.t the university’s incoming class -(hsgrade_pct).
  • -
-
## Load and organize data
-data <- read.csv("https://raw.githubusercontent.com/rdpackages-replication/CIT_2024_CUP/refs/heads/main/CIT_2024_CUP_discrete.csv")
-y <- data$nextGPA
-x <- data$X
-x <- x/sd(x) ## we always standardize X
-w <- data[,4:11]
-### Must define categorical features as ordered/unordered factors
-w$totcredits_year1 <- factor(w$totcredits_year1,ordered=TRUE)
-w$male <- factor(w$male,ordered=FALSE)
-w$bpl_north_america <- factor(w$bpl_north_america,ordered=FALSE)
-w$loc_campus1 <- factor(w$loc_campus1,ordered=FALSE)
-w$loc_campus2 <- factor(w$loc_campus2,ordered=FALSE)
-w$loc_campus3 <- factor(w$loc_campus3,ordered=FALSE)
-c <- 0
-n <- nrow(data)
-z <- as.numeric(x>c)
-h <- 0.1 ## window for prediction sample
-test <- -h < x & x < h
-ntest <- sum(test)
-
-
-

Target estimand

-

Generically, our estimand is the CATE function at \(x = 0\); i.e. \(\tau(0, \mathrm{w})\). The key practical -question is which values of \(\mathrm{w}\) to consider. Some values of -\(\mathrm{w}\) will not be -well-represented near \(x=0\) and so no -estimation technique will be able to estimate those points effectively. -As such, to focus on feasible points — which will lead to interesting -comparisons between methods — we recommend restricting the evaluation -points to the observed \(\mathrm{w}_i\) -such that \(|x_i| \leq \delta\), for -some \(\delta > 0\). In our example, -we use \(\delta = 0.1\) for a -standardized \(x\) variable. Therefore, -our estimand of interest is a vector of treatment effects: \[\begin{equation} -\tau(0, \mathrm{w}_i) \;\;\; \forall i \;\mbox{ such that }\; |x_i| \leq -\delta. -\end{equation}\]

-
-
-

Implementing BARDDT

-

In order to implement our model, we write the Psi vector, as defined -before: Psi <- cbind(z*x,(1-z)*x, z,rep(1,n)). The -training matrix for the model is as.matrix(cbind(x,w)), -which we feed into the stochtree::bart function via the -X_train parameter. The basis vector Psi is fed -into the function via the leaf_basis_train parameter. The -list object barddt.mean.parmlist defines options for the -mean forest (a different list can be defined for a variance forest in -the case of heteroscedastic BART, which we do not consider here). -Importantly, in this list we define parameter -sigma2_leaf_init = diag(rep(0.1/150,4)), which sets \(\Sigma_0\) as described above. Now, we can -fit the model, which is saved in object barddt.fit.

-

Once the model is fit, we need 3 elements to obtain the CATE -predictions: the basis vectors at the cutoff for \(z=1\) and \(z=0\), the test matrix \([X \quad W]\) at the cutoff, and the -testing sample. We define the prediction basis vectors \(\psi_1 = [1 \quad 0 \quad 0 \quad 1]\) and -\(\psi_0 = [1 \quad 0 \quad 0 \quad -0]\), which correspond to \(\psi\) at \((x=0,z=1)\), and \((x=0,z=0)\), respectively. These vectors -are written into R as -Psi1 <- cbind(rep(1,n), rep(c,n), rep(0,n), rep(1,n)) -and -Psi0 <- cbind(rep(1,n), rep(0,n), rep(c,n), rep(0,n)). -Then, we write the test matrix at \((x=0,\mathrm{w})\) as -xmat_test <- as.matrix(cbind(rep(0,n),w). Finally, we -must define the testing window. As discussed previously, our window is -set such that \(|x| \leq 0.1\), which -can be set in R as -test <- -0.1 < x & x <0.1.

-

Once all of these elements are set, we can obtain the outcome -predictions at the cutoff by running -predict(barddt.fit, xmat_test, Psi1) (resp. -predict(barddt.fit, xmat_test, Psi0)). Each of these calls -returns a list, from which we can extract element y_hat to -obtain the posterior distribution for the outcome. In the code below, -the treated and control outcome predictions are saved in the matrix -objects pred1 and pred0, respectively. Now, we -can obtain draws from the CATE posterior by simply subtracting these -matrices. The function below outlines how to perform each of these steps -in R.

-
fit.barddt <- function(y,x,w,z,test,c)
-{
-  ## Lists of parameters for the Stochtree BART function
-  barddt.global.parmlist <- list(standardize=T,sample_sigma_global=TRUE,sigma2_global_init=0.1)
-  barddt.mean.parmlist <- list(num_trees=50, min_samples_leaf=20, alpha=0.95, beta=2,
-                               max_depth=20, sample_sigma2_leaf=FALSE, sigma2_leaf_init = diag(rep(0.1/150,4)))
-  ## Set basis vector for leaf regressions
-  Psi <- cbind(rep(1,n),z*x,(1-z)*x,z)
-  ## Model fit
-  barddt.fit = stochtree::bart(X_train= as.matrix(cbind(x,w)), y_train=y,
-                               leaf_basis_train = Psi, mean_forest_params=barddt.mean.parmlist,
-                               general_params=barddt.global.parmlist,
-                               num_mcmc=1000,num_gfr=30)
-  ## Define basis vectors and test matrix for outcome predictions at X=c
-  Psi1 <- cbind(rep(1,n), rep(c,n), rep(0,n), rep(1,n))
-  Psi0 <- cbind(rep(1,n), rep(0,n), rep(c,n), rep(0,n))
-  Psi1 <- Psi1[test,]
-  Psi0 <- Psi0[test,]
-  xmat_test <- as.matrix(cbind(rep(0,n),w)[test,])
-  ## Obtain outcome predictions
-  pred1 <- predict(barddt.fit,xmat_test,Psi1)$y_hat
-  pred0 <- predict(barddt.fit,xmat_test,Psi0)$y_hat
-  ## Obtain CATE posterior
-  out <- pred1-pred0
-  return(out)
-}
-

Now, we proceed to fit the BARDDT model. The procedure is exactly the -same as described in the simulation section.

-
## We will sample multiple chains sequentially
-num_chains <- 20
-num_gfr <- 2
-num_burnin <- 0
-num_mcmc <- 500
-bart_models <- list()
-## Define basis functions for training and testing
-B <- cbind(z*x,(1-z)*x, z,rep(1,n))
-B1 <- cbind(rep(c,n), rep(0,n), rep(1,n), rep(1,n))
-B0 <- cbind(rep(0,n), rep(c,n), rep(0,n), rep(1,n))
-B1 <- B1[test,]
-B0 <- B0[test,]
-B_test <- rbind(B1,B0)
-xmat_test <- cbind(x=rep(0,n),w)[test,]
-xmat_test <- rbind(xmat_test,xmat_test)
-### We combine the basis for Z=1 and Z=0 to feed it to the BART call and get the Y(z) predictions instantaneously
-### Then we separate the posterior matrix between each Z and calculate the CATE prediction
-## Sampling trees in parallel
-ncores <- 5
-cl <- makeCluster(ncores)
-registerDoParallel(cl)
-
-start_time <- Sys.time()
-bart_model_outputs <- foreach (i = 1:num_chains) %dopar% {
-  random_seed <- i
-  ## Lists to define BARDDT parameters
-  barddt.global.parmlist <- list(standardize=T,sample_sigma_global=TRUE,sigma2_global_init=0.1)
-  barddt.mean.parmlist <- list(num_trees=50, min_samples_leaf=20, alpha=0.95, beta=2,
-                               max_depth=20, sample_sigma2_leaf=FALSE, sigma2_leaf_init = diag(rep(0.1/50,4)))
-  bart_model <- stochtree::bart(
-    X_train = cbind(x,w), leaf_basis_train = B, y_train = y, 
-    X_test = xmat_test, leaf_basis_test = B_test,
-    num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, 
-    general_params = barddt.global.parmlist, mean_forest_params = barddt.mean.parmlist
-  )
-  bart_model <- bart_model$y_hat_test[1:ntest,]-bart_model$y_hat_test[(ntest+1):(2*ntest),]
-}
-stopCluster(cl)
-## Combine CATE predictions
-pred <- do.call("cbind",bart_model_outputs)
-
-end_time <- Sys.time()
-
-print(end_time - start_time)
-
## Time difference of 9.554316 mins
-
## Save the results
-saveRDS(pred, "bart_rdd_posterior.rds")
-

We now proceed to analyze the CATE posterior. The figure produced -below presents a summary of the CATE posterior produced by BARDDT for -this application. This picture is produced fitting a regression tree, -using \(W\) as the predictors, to the -individual posterior mean CATEs: \[\begin{equation} -\bar{\tau}_i = \frac{1}{M} \sum_{h = 1}^M \tau^{(h)}(0, \mathrm{w}_i), -\end{equation}\] where \(h\) -indexes each of \(M\) total posterior -samples. As in our simulation studies, we restrict our posterior -analysis to use \(\mathrm{w}_i\) values -of observations with \(|x_i| \leq \delta = -0.1\) (after normalizing \(X\) -to have standard deviation 1 in-sample). For the Lindo, Sanders, and Oreopoulos (2010) data, this -means that BARDDT was trained on \(n = -40,582\) observations, of which 1,602 satisfy \(x_i \leq 0.1\), which were used to generate -the effect moderation tree.

-
## Fit regression tree
-cate <- rpart(y~.,data.frame(y=rowMeans(pred),w[test,]),control = rpart.control(cp=0.015))
-## Define separate colors for left and rightmost nodes
-plot.cart <- function(rpart.obj)
-{
-  rpart.frame <- rpart.obj$frame
-  left <- which.min(rpart.frame$yval)
-  right <- which.max(rpart.frame$yval)
-  nodes <- rep(NA,nrow(rpart.frame))
-  for (i in 1:length(nodes))
-  {
-    if (rpart.frame$yval[i]==rpart.frame$yval[right]) nodes[i] <- "gold2"
-    else if (rpart.frame$yval[i]==rpart.frame$yval[left]) nodes[i] <- "tomato3"
-    else nodes[i] <- "lightblue3"
-  }
-  return(nodes)
-}
-## Plot regression tree
-rpart.plot(cate,main="",box.col=plot.cart(cate))
-
-Regression tree fit to posterior point estimates of individual treatment effects: top number in each box is the average subgroup treatment effect, lower number shows the percentage of the total sample in that subgroup; the tree flags credits in first year, gender, and age at entry as important moderators. -

-Regression tree fit to posterior point estimates of individual treatment -effects: top number in each box is the average subgroup treatment -effect, lower number shows the percentage of the total sample in that -subgroup; the tree flags credits in first year, gender, and age at entry -as important moderators. -

-
-

The resulting effect moderation tree indicates that course load -(credits attempted) in the academic term leading to their probation is a -strong moderator. Contextually, this result is plausible, both because -course load could relate to latent character attributes that influence a -student’s responsiveness to sanctions and also because it could predict -course load in the current term, which would in turn have implications -for the GPA (i.e. it is harder to get a high GPA while taking more -credit hours). The tree also suggests that effects differ by campus, and -age and gender of the student. These findings are all prima facie -plausible as well.

-

To gauge how strong these findings are statistically, we can zoom in -on isolated subgroups and compare the posteriors of their subgroup -average treatment effects. This approach is valid because in fitting the -effect moderation tree to the posterior mean CATEs we in no way altered -the posterior itself; the effect moderation tree is a posterior summary -tool and not any additional inferential approach; the posterior is -obtained once and can be explored freely using a variety of techniques -without vitiating its statistical validity. Investigating the most -extreme differences is a good place to start: consider the two groups of -students at opposite ends of the treatment effect range discovered by -the effect moderation tree:

-
    -
  • Group A a male student that entered college older -than 19 and attempted more than 4.8 credits in the first year (leftmost -leaf node, colored red, comprising 128 individuals)
  • -
  • Group B a student of any gender who entered college -younger than 19 and attempted between 4.3 and 4.8 credits in the first -year (rightmost leaf node, colored gold, comprising 108 -individuals).
  • -
-

Subgroup CATEs are obtained by aggregating CATEs across the observed -\(\mathrm{w}_i\) values for individuals -in each group; this can be done for individual posterior samples, -yielding a posterior distribution over the subgroup CATE: \[\begin{equation} -\bar{\tau}_A^{(h)} = \frac{1}{n_A} \sum_{i : \mathrm{w}_i} \tau^{(h)}(0, -\mathrm{w}_i), -\end{equation}\] where \(h\) -indexes a posterior draw and \(n_A\) -denotes the number of individuals in the group A.

-

The code below produces a contour plot for a bivariate kernel density -estimate of the joint CATE posterior distribution for subgroups A and B. -The contour lines are nearly all above the \(45^{\circ}\) line, indicating that the -preponderance of posterior probability falls in the region where the -treatment effect for Group B is greater than that of Group A, meaning -that the difference in the subgroup treatment effects flagged by the -effect moderation tree persist even after accounting for estimation -uncertainty in the underlying CATE function.

-
## Define function to produce KD estimates of the joint distribution of two subgroups
-cate.kde <- function(rpart.obj,pred)
-{
-  rpart.frame <- rpart.obj$frame
-  left <- rpart.obj$where==which.min(rpart.frame$yval)
-  right <- rpart.obj$where==which.max(rpart.frame$yval)
-  ## Calculate CATE posterior for groups A and B
-  cate.a <- do.call("cbind",by(pred,left, colMeans))
-  cate.b <- do.call("cbind",by(pred,right, colMeans))
-  cate.a <- cate.a[,2]
-  cate.b <- cate.b[,2]
-  ## Estimate kernel density
-  denshat <- MASS::kde2d(cate.a, cate.b, n=200)
-  return(denshat)
-}
-contour(cate.kde(cate,pred),bty='n',xlab="Group A",ylab="Group B")
-abline(a=0,b=1)
-
-Kernel density estimates for the joint CATE posterior between male students who entered college older than 19 and attempted more than 4.8 credits in the first year (leftmost leaf node, red) and students who entered college younger than 19 and attempted between 4.3 and 4.8 credits in the first year (rightmost leaf node, gold) -

-Kernel density estimates for the joint CATE posterior between male -students who entered college older than 19 and attempted more than 4.8 -credits in the first year (leftmost leaf node, red) and students who -entered college younger than 19 and attempted between 4.3 and 4.8 -credits in the first year (rightmost leaf node, gold) -

-
-

As always, CATEs that vary with observable factors do not necessarily -represent a causal moderating relationship. Here, if the -treatment effect of academic probation is seen to vary with the number -of credits, that does not imply that this association is causal: -prescribing students to take a certain number of credits will not -necessarily lead to a more effective probation policy, it may simply be -that the type of student to naturally enroll for fewer credit hours is -more likely to be responsive to academic probation. An entirely distinct -set of causal assumptions are required to interpret the CATE variations -themselves as causal. All the same, uncovering these patterns of -treatment effect variability are crucial to suggesting causal mechanism -to be investigated in future studies.

-
-
-
-

References

-
-
-Lindo, Jason M, Nicholas J Sanders, and Philip Oreopoulos. 2010. -“Ability, Gender, and Performance Standards: Evidence from -Academic Probation.” American Economic Journal: Applied -Economics 2 (2): 95–117. -
-
-
- - - - -
- - - - - - - - - - - - - - - diff --git a/vignettes/R/RDD/rdd_vignette.Rmd b/vignettes/R/RDD/rdd_vignette.Rmd deleted file mode 100644 index 12ae6ca6c..000000000 --- a/vignettes/R/RDD/rdd_vignette.Rmd +++ /dev/null @@ -1,354 +0,0 @@ ---- -title: 'Regression Discontinuity Design (RDD) with stochtree' -author: - - Rafael Alcantara, University of Texas at Austin - - P. Richard Hahn, Arizona State University - - Drew Herren, University of Texas at Austin -date: "`r Sys.Date()`" -output: html_document -bibliography: rdd.bib ---- - -```{r setup, include=FALSE} -knitr::opts_chunk$set(echo = TRUE) -``` - -\usepackage{amsmath,asfonts,amssymb,amsthm} -\newcommand{\ind}{\perp \!\!\! \perp} -\newcommand{\B}{\mathcal{B}} -\newcommand{\res}{\mathbf{r}} -\newcommand{\m}{\mathbf{m}} -\newcommand{\x}{\mathbf{x}} -\newcommand{\C}{\mathbb{C}} -\newcommand{\N}{\mathrm{N}} -\newcommand{\w}{\mathrm{w}} -\newcommand{\iidsim}[0]{\stackrel{\mathrm{iid}}{\sim}} -\newcommand{\V}{ \mathbb{V}} -\newcommand{\f}{\mathrm{f}} -\newcommand{\F}{\mathbf{F}} -\newcommand{\Y}{\mathbf{Y}} - -## Introduction - -We study conditional average treatment effect (CATE) estimation for regression discontinuity designs (RDD), in which treatment assignment is based on whether a particular covariate --- referred to as the running variable --- lies above or below a known value, referred to as the cutoff value. Because treatment is deterministically assigned as a known function of the running variable, RDDs are trivially deconfounded: treatment assignment is independent of the outcome variable, given the running variable (because treatment is conditionally constant). However, estimation of treatment effects in RDDs is more complicated than simply controlling for the running variable, because doing so introduces a complete lack of overlap, which is the other key condition needed to justify regression adjustment for causal inference. Nonetheless, the CATE _at the cutoff_, $X=c$, may still be identified provided the conditional expectation $E[Y \mid X,W]$ is continuous at that point for _all_ $W=w$. We exploit this assumption with the leaf regression BART model implemented in Stochtree, which allows us to define an explicit prior on the CATE. We now describe the RDD setup and our model in more detail, and provide code to implement our approach. - -## Regression Discontinuity Design - -We conceptualize the treatment effect estimation problem via a quartet of random variables $(Y, X, Z, U)$. The variable $Y$ is the outcome variable; the variable $X$ is the running variable; the variable $Z$ is the treatment assignment indicator variable; and the variable $U$ represents additional, possibly unobserved, causal factors. What specifically makes this correspond to an RDD is that we stipulate that $Z = I(X > c)$, for cutoff $c$. We assume $c = 0$ without loss of generality. - -The following figure depicts a causal diagram representing the assumed causal relationships between these variables. Two key features of this diagram are one, that $X$ blocks the impact of $U$ on $Z$: in other words, $X$ satisfies the back-door criterion for learning causal effects of $Z$ on $Y$. And two, $X$ and $U$ are not descendants of $Z$. - -```{r cdag, echo=FALSE, fig.cap="A causal directed acyclic graph representing the general structure of a regression discontinuity design problem", fig.align="center", out.width = '40%'} -knitr::include_graphics("RDD_DAG.png") -``` - -Using this causal diagram, we may express $Y$ as some function of its graph parents, the random variables $(X,Z,U)$: $$Y = F(X,Z,U).$$ In principle, we may obtain draws of $Y$ by first drawing $(X,Z,U)$ according to their joint distribution and then applying the function $F$. Similarly, we may relate this formulation to the potential outcomes framework straightforwardly: -\begin{equation} -\begin{split} -Y^1 &= F(X,1,U),\\ -Y^0 &= F(X,0,U). -\end{split} -\end{equation} -Here, draws of $(Y^1, Y^0)$ may be obtained (in principle) by drawing $(X,Z,U)$ from their joint distribution and using only the $(X,U)$ elements as arguments in the above two equations, "discarding" the drawn value of $Z$. Note that this construction implies the _consistency_ condition: $Y = Y^1 Z + Y^0 ( 1 - Z)$. Likewise, this construction implies the _no interference_ condition because each $Y_i$ is considered to be produced with arguments ($X_i, Z_i, U_i)$ and not those from other units $j$; in particular, in constructing $Y_i$, $F$ does not take $Z_j$ for $j \neq i$ as an argument. - -Next, we define the following conditional expectations -\begin{equation} -\begin{split} -\mu_1(x) &= E[ F(x, 1, U) \mid X = x] ,\\ -\mu_0(x) &= E[ F(x, 0, U) \mid X = x], -\end{split} -\end{equation} -with which we can define the treatment effect function -$$\tau(x) = \mu_1(x) - \mu_0(x).$$ -Because $X$ satisfies the back-door criterion, $\mu_1$ and $\mu_0$ are estimable from the data, meaning that -\begin{equation} -\begin{split} -\mu_1(x) &= E[ F(x, 1, U) \mid X = x] = E[Y \mid X=x, Z=1],\\ -\mu_0(x) &= E[ F(x, 0, U) \mid X = x] = E[Y \mid X=x, Z=0], -\end{split} -\end{equation} -the right-hand-sides of which can be estimated from sample data, which we supposed to be independent and identically distributed realizations of $(Y_i, X_i, Z_i)$ for $i = 1, \dots, n$. However, because $Z = I(X >0)$ we can in fact only learn $\mu_1(x)$ for $X > 0$ and $\mu_0(x)$ for $X < 0$. In potential outcomes terminology, conditioning on $X$ satisfies ignorability, -$$(Y^1, Y^0) \ind Z \mid X,$$ -but not _strong ignorability_, because overlap is violated. Overlap would require that -$$0 < P(Z = 1 \mid X=x) < 1 \;\;\;\; \forall x,$$ -which is clearly violated by the RDD assumption that $Z = I(X > 0)$. Consequently, the overall ATE, -$\bar{\tau} = E(\tau(X)),$ is unidentified, and we must content ourselves with estimating $\tau(0)$, the conditional average effect at the point $x = 0$, which we estimate as the difference between $\mu_1(0) - \mu_0(0)$. This is possible for continuous $X$ so long as one is willing to assume that $\mu_1(x)$ and $\mu_0(x)$ are both suitably smooth functions of $x$: any inferred discontinuity at $x = 0$ must therefore be attributable to treatment effect. - -### Conditional average treatment effects in RDD - -We are concerned with learning not only $\tau(0)$, the "RDD ATE" (e.g. the CATE at $x = 0$), but also RDD CATEs, $\tau(0, \w)$ for some covariate vector $\w$. Incorporating additional covariates in the above framework turns out to be straightforward, simply by defining $W = \varphi(U)$ to be an observable function of the (possibly unobservable) causal factors $U$. We may then define our potential outcome means as -\begin{equation} -\begin{split} -\mu_1(x,\w) &= E[ F(x, 1, U) \mid X = x, W = \w] = E[Y \mid X=x, W=\w, Z=1],\\ -\mu_0(x,\w) &= E[ F(x, 0, U) \mid X = x, W = \w] = E[Y \mid X=x, W = \w, Z=0], -\end{split} -\end{equation} -and our treatment effect function as -$$\tau(x,\w) = \mu_1(x,\w) - \mu_0(x,\w).$$ We consider our data to be independent and identically distributed realizations $(Y_i, X_i, Z_i, W_i)$ for $i = 1, \dots, n$. Furthermore, we must assume that $\mu_1(x,\w)$ and $\mu_0(x,\w)$ are suitably smooth functions of $x$, {\em for every} $\w$; in other words, for each value of $\w$ the usual continuity-based identification assumptions must hold. - -With this framework and notation established, CATE estimation in RDDs boils down to estimation of condition expectation functions $E[Y \mid X=x, W=\w, Z=z]$, for which we turn to BART models. - -## The BARDDT Model - -We propose a BART model where the trees are allowed to split on $(x,\w)$ but where each leaf node parameter is a vector of regression coefficients tailored to the RDD context (rather than a scalar constant as in default BART). In one sense, such a model can be seen as implying distinct RDD ATE regressions for each subgroup determined by a given tree; however, this intuition is only heuristic, as the entire model is fit jointly as an ensemble of such trees. Instead, we motivate this model as a way to estimate the necessary conditional expectations via a parametrization where the conditional treatment effect function can be explicitly regularized, as follows. - -Let $\psi$ denote the following basis vector: -\begin{equation} -\psi(x,z) = \begin{bmatrix} -1 & z x & (1-z) x & z -\end{bmatrix}. -\end{equation} -To generalize the original BART model, we define $g_j(x, \w, z)$ as a piecewise linear function as follows. Let $b_j(x, \w)$ denote the node in the $j$th tree which contains the point $(x, \w)$; then the prediction function for tree $j$ is defined to be: -\begin{equation} -g_j(x, \w, z) = \psi(x, z) \Gamma_{b_j(x, \w)} -\end{equation} -for a leaf-specific regression vector $\Gamma_{b_j} = (\eta_{b_j}, \lambda_{b_j}, \theta_{b_j}, \Delta_{b_j})^t$. Therefore, letting $n_{b_j}$ denote the number of data points allocated to node $b$ in the $j$th tree and $\Psi_{b_j}$ denote the $n_{b_j} \times 4$ matrix, with rows equal to $\psi(x,z)$ for all $(x_i,z_i) \in b_j$, the model for observations assigned to leaf $b_j$, can be expressed in matrix notation as: -\begin{equation} -\begin{split} -\Y_{b_j} \mid \Gamma_{b_j}, \sigma^2 &\sim \N(\Psi_{b_j} \Gamma_{b_j},\sigma^2)\\ -\Gamma_{b_j} &\sim \N (0, \Sigma_0), -\end{split} \label{eq:leaf.regression} -\end{equation} -where we set $\Sigma_0 = \frac{0.033}{J} \mbox{I}$ as a default (for $x$ vectors standardized to have unit variance in-sample). - -This choice of basis entails that the RDD CATE at $\w$, $\tau(0, \w)$, is a sum of the $\Delta_{b_j(0, \w)}$ elements across all trees $j = 1, \dots, J$: -\begin{equation} -\begin{split} -\tau(0, \w) &= E[Y^1 \mid X=0, W = \w] - E[Y^0 \mid X = 0, W = \w]\\ -& = E[Y \mid X=0, W = \w, Z = 1] - E[Y \mid X = 0, W = \w, Z = 0]\\ -&= \sum_{j = 1}^J g_j(0, \w, 1) - \sum_{j = 1}^J g_j(0, \w, 0)\\ -&= \sum_{j = 1}^J \psi(0, 1) \Gamma_{b_j(0, \w)} - \sum_{j = 1}^J \psi(0, 0) \Gamma_{b_j(0, \w)} \\ -& = \sum_{j = 1}^J \Bigl( \psi(0, 1) - \psi(0, 0) \Bigr) \Gamma_{b_j(0, \w)} \\ -& = \sum_{j = 1}^J \Bigl( (1,0,0,1) - (1,0,0,0) \Bigr) \Gamma_{b_j(0, \w)} \\ -&= \sum_{j=1}^J \Delta_{b_j(0, \w)}. -\end{split} -\end{equation} -As a result, the priors on the $\Delta$ coefficients directly regularize the treatment effect. We set the tree and error variance priors as in the original BART model. - -The following figures provide a graphical depiction of how the BARDDT model fits a response surface and thereby estimates CATEs for distinct values of $\w$. For simplicity only two trees are used in the illustration, while in practice dozens or hundreds of trees may be used (in our simulations and empirical example, we use 150 trees). - -```{r trees1, echo=FALSE, fig.cap="Two regression trees with splits in x and a single scalar w. Node images depict the g(x,w,z) function (in x) defined by that node's coefficients. The vertical gap between the two line segments in a node that contain x=0 is that node's contribution to the CATE at X = 0. Note that only such nodes contribute for CATE prediction at x=0", fig.align="center", out.width = '70%'} -knitr::include_graphics("trees1.png") -``` - -```{r trees2, echo=FALSE, fig.cap="The two top figures show the same two regression trees as in the preceding figure, now represented as a partition of the x-w plane. Labels in each partition correspond to the leaf nodes depicted in the previous picture. The bottom figure shows the partition of the x-w plane implied by the sum of the two trees; the red dashed line marks point W=w* and the combination of nodes that include this point", fig.align="center", out.width = '70%'} -knitr::include_graphics("trees2.png") -``` - -```{r trees3, echo=FALSE, fig.cap="Left: The function fit at W = w* for the two trees shown in the previous two figures, shown superimposed. Right: The aggregated fit achieved by summing the contributes of two regression tree fits shown at left. The magnitude of the discontinuity at x = 0 (located at the dashed gray vertical line) represents the treatment effect at that point. Different values of w will produce distinct fits; for the two trees shown, there can be three distinct fits based on the value of w.", fig.align="center", out.width = '70%'} -knitr::include_graphics("trees3.png") -``` - -An interesting property of BARDDT can be seen in this small illustration --- by letting the regression trees split on the running variable, there is no need to separately define a 'bandwidth' as is used in the polynomial approach to RDD. Instead, the regression trees automatically determine (in the course of posterior sampling) when to 'prune' away regions away from the cutoff value. There are two notable features of this approach. One, different trees in the ensemble are effectively using different local bandwidths and these fits are then blended together. For example, in the bottom panel of the second figure, we obtain one bandwidth for the region $d+i$, and a different one for regions $a+g$ and $d+g$. Two, for cells in the tree partition that do not span the cutoff, the regression within that partition contains no causal contrasts --- all observations either have $Z = 1$ or $Z = 0$. For those cells, the treatment effect coefficient is ill-posed and in those cases the posterior sampling is effectively a draw from the prior; however, such draws correspond to points where the treatment effect is unidentified and none of these draws contribute to the estimation of $\tau(0, \w)$ --- for example, only nodes $a+g$, $d+g$, and $d+i$ provide any contribution. This implies that draws of $\Delta$ corresponding to nodes not predicting at $X=0$ will always be draws from the prior, which has some intuitive appeal. - -## Demo - -In this section, we provide code for implementing our model in `stochtree` on a popular RDD dataset. -First, let us load `stochtree` and all the necessary libraries for our posterior analysis. - -```{r} -## Load libraries -library(stochtree) -library(rpart) -library(rpart.plot) -library(xtable) -library(foreach) -library(doParallel) -``` - -### Dataset - -The data comes from @lindo2010ability, who analyze data on college students enrolled in a large Canadian university in order to evaluate the effectiveness of an academic probation policy. Students who present a grade point average (GPA) lower than a certain threshold at the end of each term are placed on academic probation and must improve their GPA in the subsequent term or else face suspension. We are interested in how being put on probation or not, $Z$, affects students' GPA, $Y$, at the end of the current term. The running variable, $X$, is the negative distance between a student's previous-term GPA and the probation threshold, so that students placed on probation ($Z = 1$) have a positive score and the cutoff is 0. Potential moderators, $W$, are: - -* gender (`male`), -* age upon entering university (`age_at_entry`) -* a dummy for being born in North America (`bpl_north_america`), -* the number of credits taken in the first year (`totcredits_year1`) -* an indicator designating each of three campuses (`loc_campus` 1, 2 and 3), and -* high school GPA as a quantile w.r.t the university's incoming class (`hsgrade_pct`). - -```{r} -## Load and organize data -data <- read.csv("https://raw.githubusercontent.com/rdpackages-replication/CIT_2024_CUP/refs/heads/main/CIT_2024_CUP_discrete.csv") -y <- data$nextGPA -x <- data$X -x <- x/sd(x) ## we always standardize X -w <- data[,4:11] -### Must define categorical features as ordered/unordered factors -w$totcredits_year1 <- factor(w$totcredits_year1,ordered=TRUE) -w$male <- factor(w$male,ordered=FALSE) -w$bpl_north_america <- factor(w$bpl_north_america,ordered=FALSE) -w$loc_campus1 <- factor(w$loc_campus1,ordered=FALSE) -w$loc_campus2 <- factor(w$loc_campus2,ordered=FALSE) -w$loc_campus3 <- factor(w$loc_campus3,ordered=FALSE) -c <- 0 -n <- nrow(data) -z <- as.numeric(x>c) -h <- 0.1 ## window for prediction sample -test <- -h < x & x < h -ntest <- sum(test) -``` - -### Target estimand - -Generically, our estimand is the CATE function at $x = 0$; i.e. $\tau(0, \w)$. The key practical question is which values of $\w$ to consider. Some values of $\w$ will not be well-represented near $x=0$ and so no estimation technique will be able to estimate those points effectively. As such, to focus on feasible points --- which will lead to interesting comparisons between methods --- we recommend restricting the evaluation points to the observed $\w_i$ such that $|x_i| \leq \delta$, for some $\delta > 0$. In our example, we use $\delta = 0.1$ for a standardized $x$ variable. Therefore, our estimand of interest is a vector of treatment effects: -\begin{equation} -\tau(0, \w_i) \;\;\; \forall i \;\mbox{ such that }\; |x_i| \leq \delta. -\end{equation} - -### Implementing BARDDT - -In order to implement our model, we write the Psi vector, as defined before: `Psi <- cbind(z*x,(1-z)*x, z,rep(1,n))`. The training matrix for the model is `as.matrix(cbind(x,w))`, which we feed into the `stochtree::bart` function via the `X_train` parameter. The basis vector `Psi` is fed into the function via the `leaf_basis_train` parameter. The list object `barddt.mean.parmlist` defines options for the mean forest (a different list can be defined for a variance forest in the case of heteroscedastic BART, which we do not consider here). Importantly, in this list we define parameter `sigma2_leaf_init = diag(rep(0.1/150,4))`, which sets $\Sigma_0$ as described above. Now, we can fit the model, which is saved in object `barddt.fit`. - -Once the model is fit, we need 3 elements to obtain the CATE predictions: the basis vectors at the cutoff for $z=1$ and $z=0$, the test matrix $[X \quad W]$ at the cutoff, and the testing sample. We define the prediction basis vectors $\psi_1 = [1 \quad 0 \quad 0 \quad 1]$ and $\psi_0 = [1 \quad 0 \quad 0 \quad 0]$, which correspond to $\psi$ at $(x=0,z=1)$, and $(x=0,z=0)$, respectively. These vectors are written into R as `Psi1 <- cbind(rep(1,n), rep(c,n), rep(0,n), rep(1,n))` and `Psi0 <- cbind(rep(1,n), rep(0,n), rep(c,n), rep(0,n))`. Then, we write the test matrix at $(x=0,\w)$ as `xmat_test <- as.matrix(cbind(rep(0,n),w)`. Finally, we must define the testing window. As discussed previously, our window is set such that $|x| \leq 0.1$, which can be set in R as `test <- -0.1 < x & x <0.1`. - -Once all of these elements are set, we can obtain the outcome predictions at the cutoff by running `predict(barddt.fit, xmat_test, Psi1)` (resp. `predict(barddt.fit, xmat_test, Psi0)`). Each of these calls returns a list, from which we can extract element `y_hat` to obtain the posterior distribution for the outcome. In the code below, the treated and control outcome predictions are saved in the matrix objects `pred1` and `pred0`, respectively. Now, we can obtain draws from the CATE posterior by simply subtracting these matrices. The function below outlines how to perform each of these steps in R. - -```{r} -fit.barddt <- function(y,x,w,z,test,c) -{ - ## Lists of parameters for the Stochtree BART function - barddt.global.parmlist <- list(standardize=T,sample_sigma_global=TRUE,sigma2_global_init=0.1) - barddt.mean.parmlist <- list(num_trees=50, min_samples_leaf=20, alpha=0.95, beta=2, - max_depth=20, sample_sigma2_leaf=FALSE, sigma2_leaf_init = diag(rep(0.1/150,4))) - ## Set basis vector for leaf regressions - Psi <- cbind(rep(1,n),z*x,(1-z)*x,z) - ## Model fit - barddt.fit = stochtree::bart(X_train= as.matrix(cbind(x,w)), y_train=y, - leaf_basis_train = Psi, mean_forest_params=barddt.mean.parmlist, - general_params=barddt.global.parmlist, - num_mcmc=1000,num_gfr=30) - ## Define basis vectors and test matrix for outcome predictions at X=c - Psi1 <- cbind(rep(1,n), rep(c,n), rep(0,n), rep(1,n)) - Psi0 <- cbind(rep(1,n), rep(0,n), rep(c,n), rep(0,n)) - Psi1 <- Psi1[test,] - Psi0 <- Psi0[test,] - xmat_test <- as.matrix(cbind(rep(0,n),w)[test,]) - ## Obtain outcome predictions - pred1 <- predict(barddt.fit,xmat_test,Psi1)$y_hat - pred0 <- predict(barddt.fit,xmat_test,Psi0)$y_hat - ## Obtain CATE posterior - out <- pred1-pred0 - return(out) -} -``` - -Now, we proceed to fit the BARDDT model. The procedure is exactly the same as described in the simulation section. - -```{r empiricalPosterior, cache=TRUE,cache.lazy=FALSE} -## We will sample multiple chains sequentially -num_chains <- 20 -num_gfr <- 2 -num_burnin <- 0 -num_mcmc <- 500 -bart_models <- list() -## Define basis functions for training and testing -B <- cbind(z*x,(1-z)*x, z,rep(1,n)) -B1 <- cbind(rep(c,n), rep(0,n), rep(1,n), rep(1,n)) -B0 <- cbind(rep(0,n), rep(c,n), rep(0,n), rep(1,n)) -B1 <- B1[test,] -B0 <- B0[test,] -B_test <- rbind(B1,B0) -xmat_test <- cbind(x=rep(0,n),w)[test,] -xmat_test <- rbind(xmat_test,xmat_test) -### We combine the basis for Z=1 and Z=0 to feed it to the BART call and get the Y(z) predictions instantaneously -### Then we separate the posterior matrix between each Z and calculate the CATE prediction -## Sampling trees in parallel -ncores <- 5 -cl <- makeCluster(ncores) -registerDoParallel(cl) - -start_time <- Sys.time() -bart_model_outputs <- foreach (i = 1:num_chains) %dopar% { - random_seed <- i - ## Lists to define BARDDT parameters - barddt.global.parmlist <- list(standardize=T,sample_sigma_global=TRUE,sigma2_global_init=0.1) - barddt.mean.parmlist <- list(num_trees=50, min_samples_leaf=20, alpha=0.95, beta=2, - max_depth=20, sample_sigma2_leaf=FALSE, sigma2_leaf_init = diag(rep(0.1/50,4))) - bart_model <- stochtree::bart( - X_train = cbind(x,w), leaf_basis_train = B, y_train = y, - X_test = xmat_test, leaf_basis_test = B_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = barddt.global.parmlist, mean_forest_params = barddt.mean.parmlist - ) - bart_model <- bart_model$y_hat_test[1:ntest,]-bart_model$y_hat_test[(ntest+1):(2*ntest),] -} -stopCluster(cl) -## Combine CATE predictions -pred <- do.call("cbind",bart_model_outputs) - -end_time <- Sys.time() - -print(end_time - start_time) -## Save the results -saveRDS(pred, "bart_rdd_posterior.rds") -``` - -We now proceed to analyze the CATE posterior. The figure produced below presents a summary of the CATE posterior produced by BARDDT for this application. This picture is produced fitting a regression tree, using $W$ as the predictors, to the individual posterior mean CATEs: -\begin{equation} -\bar{\tau}_i = \frac{1}{M} \sum_{h = 1}^M \tau^{(h)}(0, \w_i), -\end{equation} -where $h$ indexes each of $M$ total posterior samples. As in our simulation studies, we restrict our posterior analysis to use $\w_i$ values of observations with $|x_i| \leq \delta = 0.1$ (after normalizing $X$ to have standard deviation 1 in-sample). For the @lindo2010ability data, this means that BARDDT was trained on $n = 40,582$ observations, of which 1,602 satisfy $x_i \leq 0.1$, which were used to generate the effect moderation tree. - -```{r cart_summary, fig.cap="Regression tree fit to posterior point estimates of individual treatment effects: top number in each box is the average subgroup treatment effect, lower number shows the percentage of the total sample in that subgroup; the tree flags credits in first year, gender, and age at entry as important moderators.", fig.align="center"} -## Fit regression tree -cate <- rpart(y~.,data.frame(y=rowMeans(pred),w[test,]),control = rpart.control(cp=0.015)) -## Define separate colors for left and rightmost nodes -plot.cart <- function(rpart.obj) -{ - rpart.frame <- rpart.obj$frame - left <- which.min(rpart.frame$yval) - right <- which.max(rpart.frame$yval) - nodes <- rep(NA,nrow(rpart.frame)) - for (i in 1:length(nodes)) - { - if (rpart.frame$yval[i]==rpart.frame$yval[right]) nodes[i] <- "gold2" - else if (rpart.frame$yval[i]==rpart.frame$yval[left]) nodes[i] <- "tomato3" - else nodes[i] <- "lightblue3" - } - return(nodes) -} -## Plot regression tree -rpart.plot(cate,main="",box.col=plot.cart(cate)) -``` - -The resulting effect moderation tree indicates that course load (credits attempted) in the academic term leading to their probation is a strong moderator. Contextually, this result is plausible, both because course load could relate to latent character attributes that influence a student's responsiveness to sanctions and also because it could predict course load in the current term, which would in turn have implications for the GPA (i.e. it is harder to get a high GPA while taking more credit hours). The tree also suggests that effects differ by campus, and age and gender of the student. These findings are all prima facie plausible as well. - -To gauge how strong these findings are statistically, we can zoom in on isolated subgroups and compare the posteriors of their subgroup average treatment effects. This approach is valid because in fitting the effect moderation tree to the posterior mean CATEs we in no way altered the posterior itself; the effect moderation tree is a posterior summary tool and not any additional inferential approach; the posterior is obtained once and can be explored freely using a variety of techniques without vitiating its statistical validity. Investigating the most extreme differences is a good place to start: consider the two groups of students at opposite ends of the treatment effect range discovered by the effect moderation tree: - -* **Group A** a male student that entered college older than 19 and attempted more than 4.8 credits in the first year (leftmost leaf node, colored red, comprising 128 individuals) -* **Group B** a student of any gender who entered college younger than 19 and attempted between 4.3 and 4.8 credits in the first year (rightmost leaf node, colored gold, comprising 108 individuals). - -Subgroup CATEs are obtained by aggregating CATEs across the observed $\w_i$ values for individuals in each group; this can be done for individual posterior samples, yielding a posterior distribution over the subgroup CATE: -\begin{equation} -\bar{\tau}_A^{(h)} = \frac{1}{n_A} \sum_{i : \w_i} \tau^{(h)}(0, \w_i), -\end{equation} -where $h$ indexes a posterior draw and $n_A$ denotes the number of individuals in the group A. - -The code below produces a contour plot for a bivariate kernel density estimate of the joint CATE posterior distribution for subgroups A and B. The contour lines are nearly all above the $45^{\circ}$ line, indicating that the preponderance of posterior probability falls in the region where the treatment effect for Group B is greater than that of Group A, meaning that the difference in the subgroup treatment effects flagged by the effect moderation tree persist even after accounting for estimation uncertainty in the underlying CATE function. - -```{r kde, fig.cap="Kernel density estimates for the joint CATE posterior between male students who entered college older than 19 and attempted more than 4.8 credits in the first year (leftmost leaf node, red) and students who entered college younger than 19 and attempted between 4.3 and 4.8 credits in the first year (rightmost leaf node, gold)", fig.align="center"} -## Define function to produce KD estimates of the joint distribution of two subgroups -cate.kde <- function(rpart.obj,pred) -{ - rpart.frame <- rpart.obj$frame - left <- rpart.obj$where==which.min(rpart.frame$yval) - right <- rpart.obj$where==which.max(rpart.frame$yval) - ## Calculate CATE posterior for groups A and B - cate.a <- do.call("cbind",by(pred,left, colMeans)) - cate.b <- do.call("cbind",by(pred,right, colMeans)) - cate.a <- cate.a[,2] - cate.b <- cate.b[,2] - ## Estimate kernel density - denshat <- MASS::kde2d(cate.a, cate.b, n=200) - return(denshat) -} -contour(cate.kde(cate,pred),bty='n',xlab="Group A",ylab="Group B") -abline(a=0,b=1) -``` - -As always, CATEs that vary with observable factors do not necessarily represent a _causal_ moderating relationship. Here, if the treatment effect of academic probation is seen to vary with the number of credits, that does not imply that this association is causal: prescribing students to take a certain number of credits will not necessarily lead to a more effective probation policy, it may simply be that the type of student to naturally enroll for fewer credit hours is more likely to be responsive to academic probation. An entirely distinct set of causal assumptions are required to interpret the CATE variations themselves as causal. All the same, uncovering these patterns of treatment effect variability are crucial to suggesting causal mechanism to be investigated in future studies. - -# References - - diff --git a/vignettes/R/RDD/trees1.png b/vignettes/R/RDD/trees1.png deleted file mode 100644 index 0a5bc3acf3e9f2f82ab9fa76cc11949c673195d0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40369 zcmZsD1z1#F*ETWW(2XDhLw5=i(w%~Uba#W&(v5VNNQ=@bAxaCZIxghH{qYVlY*)eGp`iD|!p`s+~aJm%|}u{z}t zo|ZzqC%AVn5rW3rN5wBubs*%fMB;B?*yQD<@zRR#8yVXP+tvCPT$&8~VyzK#}J3`-i0|*#0%=$Aywx!$;GP3+`xUtLz!t zarkd|o93#Py_pbwLz|*=AJ+AJ&EUlo5g4{!9pMi&tMb9yS%(fS_Jd`s++8PKk|h;{ z5!+p(K4}$hPiCecwYo2U36EUg;y&goc`{&%t)qa)P1|QfTP4SLzK~??#XXJMNx^C_ z9*q5Bj!BIKlkj<(v4k|{BU7{|CA6)_dt~lBSHyDjBU%Zunu4fzZYbskjXL&_hJQ$2UVs!kzffvS!#zQH*_w~p7b%Bu+DZ>+j#T<*#j>u_OFyxvR z+VYml$_UKhGZX| zl$Qs8tD8NwuyAy-c5=OY$h!q@HD#lr?W(P;BxvU3z;0shWNN|g>EH~%1VPAC5PWp7 za5aH>I@mk92zm-r|8<5S_zXYHK@I!s6jwW8YHejzn55HF3)p>jPIgXe5p);~CiK+Y zQcz7w=AX;Kzl5o+U0t08IXFB#JlH+%u{%Ar;@}bx5a8hC=HTXL181uZgLXo2xK2HT*&U{rS6{7M?c0p5*BAPq#n^IpE)L zaItf8{C96~sSy0ApsJ0hg}siHjRP1S@C*?iE>}b zoBzG?v5Uo1Nhb&JNLP_xGxN{Izkm79g+d(gp8qxye-HCtN5MFYpbK&QH)kT~vy0SV z-H0K`ONncEBJR9In^T@V@5#ZWI`l-NOUMYpeDz7S(78#UF5$8{>;((arly8kM*oC# z-(dWku}-(06QKiu$rX_DjebKVN4_4w@lo`4mOUFETXcjbrd zW6bRm#bWt%xd6x?KW%vsZvHxbR${bxgg<^5kqC7?Y5wOReyO?zi@=+`Aj3-aXR|XAc-dMU{WP=x z81OmqfbF`*f|uF~f81oB4ly(QtY${X=8wb9IN)&8>vCfT@&`eloiZ>yHO|!pCWtmx)E{k8zrZkCWbR@9ZCYAz%YOPWnmzduwteef_Zq4!xOw zp3Q*f{6t>mJ`0QV|K9tM)9EdPo#Y>fvuVI1U6lLKfAttF+kgi6BQ?Ei?*G{vCM4Ic z6AhOd?mrI;BRO{suYRztmZ4q5LVDo76OTo9|C8Hi0|s8(iG7(@+8Dp)EFu9cRxYHK zC0gU#f&P~(l-y?Bn8oU^V|TtZsATdvvG>GLHf$ff$Nn{7{9y<@Y+A1r*wgN+Qxk_k zlxPFU=3*L7T5qnLG6da9243)Sh^rXW9*kP!d+{?u>kgk|7LUU$&mdO}8DHw=`0IY~J8r*$^Q-0i zf{023+|SZK>t!5A$t{lLN#dQDZ7g+$7f=v<4{+HS>J337SRHz>m;Ca9N1rUEFYPPE zG;EJXZbZFW%b3;S9MM6pCYKH3)wkxoi9BDwWv#kcHC!@<{-CNZ4elmf~}Kx`}$-x33f){DjupxioQ>yP(<-L1 zr#OCnCqGrDgZw$-NczDStKn?nK@(l;(Tnk&)!zq>6EQuUYqdI@Ed4QtO0HNS5>or( zxQb}XgYS(r&ai<>T+`F@?MkgLxuiBTHLsG{^zJ(=z2S)1=QaO?JJa~2;Am$?9Tk%V z=GkVdEa)s2hH0}w;CFN7kwWBS_FI~1Q-N@b zH1b&GXl1%@70~XS%zHa68bjHRwsR91T# za*}UR`lx0E@6LaC(Di7PPrM(Qt$(yYcKj}8AQbH)<5zNQbApV`=AM@SEUT(bJl#>L4Ho_FLAM}`b*H%rIv+ANfs`>uJ@kTa4t2kCi-TS(~F=t7~(FK8VmI&RVn^&zHvNeZl9n zZ|TSIE72O^HZr>zyZ7GawV?BrML?1({6e;!eR($QpBjcm#+*Y`R**=iNUT$B8Y*<~ ziAKSAbFRsyjY-rnAVTiG=f3ewtyP@Rk1m3-H;)P;bn*uFn~K$5MP4r4T;xby&e(=-2)}=7AiMNYK*DGOtUxm=U}A@yT08_8@_!t@rj1Wbl*l3Wzb|a} zTXX{#e22wTNur{&eYa*#P5hr01+1G~J9tlJ{c%`dJU}9fCG&YG?XQ^xBYT6NJF>h` zZ#DQQR3QRur!hZ6TkhB311Po6fXL3iIqYs#{^!}ekjBPByUa)wy8pTNr`3g$+I2q| zLT}dd-&)i574mI-`TI|^O&Z*QtAf#1A{I@#wZ4=!uph&QxiUS3dh+guV%#Q9W=TDy zSzKiQ)e2{}z*4naQfc~n&xo@iO0fNYj#kFNwJ%PsDM2V0nehbzI=T8FBPB~m5et;) zuTR7_{YseuTu8WDZ)~`_BW1mk+d$AyX{#}AN9sXC#m()GCN1bYx!7y(TsBr7ZqO%#TY}Kh4g&e{tY;{ibR&R`^`i`H49}wd#K>ikKjv zD0JO~Skm(65Zy?vl}c}pXe&IX4zaD?DEwTG;wW))WGr1VsR{V+nhJP+c2n*~dLfP= z(RQ%bZ?V)7YH4oeMKMw%_GRq7m&@j8LHOF|6_nq0ITIw;#^jUP>pIka6S6>#)=M?Z z!A!dCsq)(0%;$!T>P1T2PV+*=n&p`q<+_iJ?|1(OL%_u35Zmfrj*OL=1+--gdl$Oz z&Mw!XmR-vWpJX%$W>fv5HU}wsz6-qdy>!*eIO;pB_-II z$sOVI=iS@ld;F{|rXESpKU)mEWxhU;p_VU&rbQrLcuz`5a`m1Jp^KY(&HjH+K!tLy zRwK#@b&){?UBAKHwx^DxIpjb%cQMK^u~}JQjYjPb!N2!KK=XW#FxdHi zBC{+CrZx@WiN)%tgqBx`4vI517NK|_eY`QMmj7w#A`=qSOG!EEb>_rEO2z|^ib%ry zVDTCSixu)nDVa%e87-ie{o7>EFa+YX!lIA&1~>O2jRWT2x!KKBn}wSXXMbJbRI{r@ z#$(m45`%98ZwkmSaBK*NsUIAA5^v#+AiK6op<|Rh>#AJE$HVb}(}93WsBuwd?=vJ4 z{d+o+uq56YZY}((a0F_L#})cBT3{(9O0V)r5lYg&1#>T7q~tRfQ?v5dP%la&-Bs25 zQbLGXzNVN%Yc;}Dr0XDSdliwh zLd-ina_stzFtEUq;Mljyq;vK3$I8LH_nth+zUF)LAGrOmeaTXV!rp&dyGQ^p1W>UtO3gNckdWlIb{yyJ14)6Raj#YB3~GWrq_g%p6QOMEL^ zuis%(is;vv_fkto5+;b=oDJJPxcNnW0=_~>ga;g#-x~(BMc~m)1ZiPg-PE{Hi zq0xEFJqg$M`056b4NVHeRC~M8=L&;YfOn`BDUlXGE=?W#r0>1^mdhxO;qOVOjSDcM z(c7J_mb`C2ojdOYC{#o-qA_s{9r6Cq``!94v`+Ky@rF-WRw7iDX60pllEk=QoI=G& z#2ErIdJ@`mKF?bkVeiD^cDIvKIzje%&S|ArR+j5g8hA2=CYW?>V> z>axlmyiW7_8A4=Chg*}SYb_VwlY(#I^c{aTs$|6nIm2RAgTf3EQE*ugAw{QK2Il>9 zhf0OtZEJ$akJam7HQ}DsP>4`ASquTAc2(|yiOuJVmpt~= zQ~BX%rv2AAgTZ40cz^tp zm2M`s+XKCh?>6a~G|RLXh5V@gPB4Mm(98W!oC7e}{ebU~_uFuhN1bSb9FJ@|glzoTl|my9xI9hhdnc`Sh1TacdqIP_c0U_AWdR6~rM^<$ABF zwaG<%w57}X(l~N5u>_tSy*bBfjRae>SO+Ve)nTEaYNh`{5=ZQ=i|gjMMHPavBF=e7QxFs5cxcP#g75*zY6nl6+zlC(*Nal5|kIb`Dx{$2(h3=>HC%dw2n8`v3z?ijm(ri6wtfqOkk{ zal`HOY1<89;8WZ#6sn`VP`*rDTgp+E1lb%B-@>PBeKIOB+x*G5B}jrUAPh?Hsl;Ju zX^VB0<1Cf$sX5I=ofiYzM8m|bc}OorxlnohdS;A73=<%Phy$sr#`2ezVpiTaS0|;n zAHKNnfIl2B@b@P53~B?BfrAsTn|9JF$CvmG!PM%z^Q}^3M3<_f+dMyTq0zLb^7n1V zq~`rD9M&iWEcyuhQHYZ|+VlLc1;BMs{km^`Vz~4hodnH-Ko`HqTcMV_XlUu%t1& zG(BAl;!^|^97QNSQ4-+CIl zH(U-)mn`H?H_{k@11aCXHe)Cyu3#-AVFYTIL#kA{>sX=R39^i5!$Pi>)BsSLtT*$lP9ue z&8Qn!&EMZCmV4w6)ysXGTdaF`O9`f; zB{iz&KX`8|Un(IXR*WY?zWKGR0@R;R9p2_n%u;*rzXYePf^ctgR$O>u5|_Ra=w7zQ z6)1#d&^VOPY9+)=1Kb1c@OGRO**XVYecWP9-P`=9W`Xk7(q(dQ>|Q^7S*Vc0%*=hq z$KhW>euvgqIm9E3>8^yxHiQ@|7a&a!L?`yjmh+NGik}neDw~oLelliv4`>~ElmOK< z(GcQ%gcLCw80$2U&Xvl>m|{)p<>!iIKBVs1`oQtmDvAJ7 zX@n4m1-V+1RcpYJJOa`LlD0o(eD&LCCV=fee{P~t(*~5n0_-uwa1kTqAJ%K6ctZtP z{kC8!ct#@Jnwa8aAl2(3$t2DU*XjBES)lVr3y77>YU#0J%XTn|3ZYpKkbD)yq?>=O zp7?+a2n<)vnk$L%Pz~Y=V7Aa4pu2_3$Vk>!U3Ib{{OIf=*3Oy<|DkR(h>q5fIZh!7_m*)^Q+1)Ae62FyM?Ft=wjLuZ>hI9C214O@FrbYJ60*x$#|4F{{lXk``pL(()lL%N(gsF&~r)JteW5Ws;cQA6#nG|KB z4%F>>w6tWBqTVDn@q8LGf^X@Re*Fh0MbxuD@v=JwaQ*e~b#m^C{&r%SX4L2`e&@bDV~+3742QMq zR*NeaX`l{_PZdqN0=ZYM+1=sn=NG3Lmn0I9#^aVcZf^uvX(x8Zt0x@4iid#QgE-S^^c zz2aUqR>KwaI(iL`Y$TnWO=*AypVYtHrIURf@_2|#?-7s=zJSE?E(gJK0oMi_Z^K!X z>+jw)=7@!3lNSP3HCAV%E~|-}WJ%I&JQlr2A$$MPghcz@cnL)fB_T0_k(c)=JBE%QyiZ~`T(kdnNfu#9j?Zy>IWMu_*(@`nBgv1vro(pj6GC}82E+w(KnDHF zM7He_17%|FFfFZ}ZR9k-JS_j#>x_93AzGUYt*z+wLAfkSw8lh;*1DeII?|sd&+^qJ ztXI4a*S+3omMPSrE&7!JEe0f!fT4t5M@+*U_#!c-4agPRClFVqKxgIZSXtsC$StC? zG{1ge0>p>0#wTVn=!Zt*U)0RVA7KWPe`Bz~cxo8(&+5{bMS$<@riz&zmMRL66svs) zt~}|&Mt+)=)(-tc{&C3wIPdB1=aZ#*he~WNQHLe<)tDNw6e`vkbb*<$NizA7xo_a# z1v{q*5HrCS?0R>lCA39Lvjcj>`HO@vDJOm)Br3vL=0jR%fHQ&Yyh0?6(Qp-s+|;gt z;{MR1dzLVLA{)-{Z#_ymqOupfxxs6TP{t zHI@$t4DA!#P&phH)xw}kW6^pDoCcb!^W!9Li@rC$07N7%_+2Q&WpMxN!<@-Xv)+>M zg^H*XrcmP6w}RV$wdwD!a^VM@Rou=GIR@kv_wp4`>>|a0jP}ZQvINNlRpdS33&|iE zDAwE06h1rN8vEew0pDZMWPA@_rU|%MHm#-Vtpaw*g}=J=92ZOj-ZP?}?tAlWZ|y!453EWeCvg~w0D2{pYbzUj_t6EjW*OthsM~2k$TR?;nGUG^7))J^=qH)N%sq4R}h# z7<-neKUm<$nqC0Wri=vPi(Va#D;SgVJLleG#@kGK9^x(2Dp#J zq+X<-u>G8XVRgFN3~o!ka@(AJTPpN(+Dz+i57rc(@e$|tG`H2ip%B>XJc!mxi=~0$ zLTZ6XD3DpALeN8W9<2S%5i|^>I1qGhTz5AJRD`GiN~tMnc5ih0QeS>x7Pt`kFvsWM zdDa7uhrqs}{cPaz@^aV7U;Pegl}Sw+u$*_+2220m-*6lrPyhw$ld!p$F?;HK9GyzG z(9@qgI`Z=J37DjF4^FPqS}&yqmL*k3(ddlM4vfr#@ zEG`UlZTDaRYQQO}9GhM};pG+ZvJ|!pJ$$Z?KX0)>1f;OdYH|X+rOhvNenkGW_dwEK zBn7T1C6{SPo6$;n!+c_n-?_YR2Q4POG08}0xY@Unyo2=@PH-(Hd2!C5K?Z-;b zW0J+v%9dFJ6cw#m9MMZv)wCLm{*pH)o9>6NCDFvlRBPq!@T zCD?b=E>Zk5vk{>{STxQ1X!Z*`|5eLiPluq45lXC=aA^9gdw&Ul)wls`NTI3P5nkW4 zrI6u!%IES&&(ouCk0g07wn{5CAb66u4q^EFjDZXOHwGtu0Lv6KJmvsQ2;V!*w+ zPJ@ikF?oIPh2s2suUBfX6m03VrA8xCVT*G_`kM*&5sEmm8ut2|&K)-#JwcAS8EZ3C}M#U<$`&_~8wxuCCBSuhb(NCPgX%YnBy(9=< zZ|GyWDmn$Z4IlF0)K{dHNWJLsW~IQPla*?q z?tAl|2$OEz8=v#d*V%hj1zbyh@TD6egK*^xltc67uIGJHLbE^S{OJ&t%*7=qK^ARq zX4DPJ{n3Vu!zg|W!-HM7PR?fU2X-?M+D!59Dkd{hDZjW+#Jd0dqgeGKLme+XAOor} zI-ZI^sGI-QhJH7E{DD%9bJBwE$=3rQMQN3@N9n|ZDujFMrDZy`GP(e}Am74HX2|xG z5IO%w zzPvuC&S#)=kXi*W)+umpP8egdT=_Awk?7PynP6w3BS7|jV^S)gUJa8WVTBN#;(6@N8=#R2KRb$$my^%2 zI$sz$5<6}0g4Kzh9+vD&+O@(M_!!lX|2-@X7UoGrwLu`)E)qiR~$$-5h_Cv zl0~na`3a<|@`)(U&7jE7)*Z}M=~Qw#u)4MZ)F)Ljw3s|xr-jHZc)HiZs4XyHwWJz; zcvc+y0ceFmRtT-5y?e876@#t{s1s)2xs|$ZO~|`l;`(vo1-$=s%+jFl{Evk)kC!a$`2hm7)28EaaGm-m;`K_xY` z)ngz|_DkXj6MjOp7UJkA7^dy6!r8%E3W7;P&U0eJ z+^>ORyjDYZdA+PX1WeaRc0Hq2nCheo%sZ|Z#mO8k`71w{YN@Q*o2Pn#q5}m*x*OZ>ooOMn*U=yWL70vH{ro{z=~xV&2Z z=LpYRE>e&fIocTV2b#(VB)!P7C!7z={@VWy1OX~2N_{5z-+#835_~ZQ*+}6F(ya?T z8X3mg>Z3i;=6M-j2f(z1zr16EE_T|A_=hWbck%NM48OU=IwS6EfmH73E75V?>J3y;U2c?b8U8 z+nZ}rx4BLAt<#H8LZw2#Ny317>pparu)e@_84+X|3hIDfwem`*(4S4OA>;{+(g?ctySF|oU^GK1~C+~d*FA>$v5Q&M>!ms5i|1Dwl9vcw+*G8(l%YOLyTFV}sG znTE7(0Oi75+5k!8DZoU<$2!LI7PKbycf8tGl^8+{!_tjvb*Q(YBNeBUV~N0arA?0~uO>0OlE*5UG zou~KXQR1q83}oC35`{~y9{6IEtFJ|mQW_Tgn%UgkH^>vTi%tCA0uK7tUh0jQ?DI9p z`5D|I+(81%AV(s7#=3M5(U1YOAZeh(BUSQrM z#ayiT@KZMh=Wd(CSt(>ACXkYt<{1znlY3`?VVTl|DZ#J+GJBw8UVGqPWHuenj@(s{ z?`M4%<$ZcThQN4;bL&#eGoZ?+xI^e8fF`0PA$0k=>WxhqS}>LnU%GJDtQ6EX@NFcn z!9d>;D90(W!d~j;1A=(V%?jlW^ekFi2YVeV9sOmAqMV^kbBITd`p(I$Fyh*zN-w=y)Z?5 zd@8z0${5n$9UYgkeId3RGa1OmwODvtmgjz~qc(9YL`WfE_!TeKHeUtFQKt%IkCGuX zMB3H@syEUI6RJs?&q(Nt_63;;kGpZf38z$?b&Ip+d1&fIti?r`(>;LX!o1-e>r`J? za$ZHhx`l%HN!P5Gqno{~IaecN+d&Uj5C;8*4Ta@kZNL8Sz)naGp(PXQvZEP@q8t>; zxTGcMILEJ5hZ|yQtE*(oEIP>96&Y=BO1ECIGSjm3CF*aaC`XJak~S58f}xeJdAzr< zpt`DMC-)+aq&sKN9&SU+zdIQ=;6C{$=&fAcp$A)s87?S+<)&^PGFv4-6 zfz9%+gI{1D0gd)0LVl%bplO4^yev{UuYNo((ReQ!oakmuNccU9>JE;wU(Q?4b^*xs zOXQ#J5!7-}2z~p|l}E`=ZZ?NSXmBUl|7k7anrs0Vda$4)Wr?1K- z&2<2(cdfAi_G^0RM;;s(+wCg--!X11j1%PM&5^udnm$u$DpagHv>>t7Q`@N@6ndpI zk#JZoyS=rhe)EYQ@7a)O+Adj3%qMywvhP%IIm*Qb%ZtP1qeLRSHU<1@BM&1s*&gyS zaPZ43Mk!|l6+^}$kN@f~1(E9Hm? zuPfyM&lFx5X(}?T$Y8v73hG9h-YMKx$w{g8_S5tijlGc4ByyrDRlMYlS$qnZVn5&z zclA_caPAe`ye}hGCd8YeRT!DssP*k^z?p%It+es1NPeE|9Kat=VbMzUnD>&kohmC< z%@Ow2bZlHg)u}Om4od!}aIfLc0h9B5|B0XGx*%kde-zfqrrr?SD!OZ-+;4Wwaie){pb!GX^lmKZu4uD ziWS#;jY^Mf_7Q#!01QzbUe@~w05tQt#wS~quA5^;t7KVdAu+(~Mu8CT^k?#xPP};} zS)`Of&GF$#ZGVZXHAlcj1{iz^^l|aGp82f6O=yE=_xdS(r2cnXaq>U=`1-Io-0?d* z1f$TDi3VSE7eAMX>eO8aTqG9lb@&X* zG(UGj068SfwN;>))xBzi7fgBuZGg^({Y;HKsN?v^~=W>8&xUGdMPs5FNP*-$AB(Mhj7r>dUBRzBq$|lGp;3yNo@5A?)c)Tl&~LMsX;eR7HCt~|rK0Jg zv^OId{46yw&}Ra{|G?+$za#>X>M(xGFg{z92qit9CxzGEgB?q=Cvt75NN_$C1qD{A zc6tAgZv}D8vhJs0xlRG}Q*!43Ey5+eG&r!$xjteD?9~J^b8<#%!XbgcIw)|U;+4SF zeV8poIx32c*OOb^baQ<{z!YRP^zxonY%+)c%ue5s!kU9~)GiXnRvHj@W|}=JUq0q_ zWmAd;g~U`EQxqZCRiG-38cTgRW>b*}KEjrwK$UPX_kqM$BJ`pjQ zyu^^&k~p0PQdcjaEmUCi8z$%O5O9)m>kD*A`WG=+bxfMppB&_3SuaN5y#4@iQ9qchJw|2K#22w?D)9)0-77tDX@K?6r6;+)R<~}5WpklN zDx)uy#)`&BWQ<}C)I^Q5pl!Yv3G&6kb zj1kdoh&U=D8c+u5Mk4rZC*<}H2Cb@1KiZeG=Ns3GaO}-`p47!y=CCWwP0~ls;>so&ri#nIJ$obJG4XALy z^opG!TZ#)}wAdub>Z@K?XbgE?c!cIB#CWp5)Hx1X>Le8#46~LA5_{@+!@*oSuJ^NV zujgU!p6o+N#~n(&gP5$Fvxr&$gYCUSshe0^U!r>z4zxJdE5PitSXI@F6My53&Q?!- zvtkN;^({>N1v?jnXyB)DHM88}?7Y_A1_K%iU4VBhU~yZ1PlFN4Lfs)4+I7}=q3#7( zgDY}vp%7ziL6Z*1%v-y(gL|Q*k9!m89xKE3lrx}}MdM z-h3wrHI5GTleQ3N zOQW-eJ<0HZp*oOUN z&5ktG;bQDSg*;&)hVN8}9Jpk=Zu7eBMeTJi*svkg^ry z-^=&bO-y=}3wD`TkL4zoV9MSBipLqlaVRx^Y?+BrUpn^fM{ z&mLSXd+tb_3z0lWfyfV(9@X6wR4R!d&@Wv9x;M9J7xb;O8Q|M&l9ATdBlYm|A?93! zOZ9Sv(MDZ2gVICk*0_3{SZA-LG_#s_tH^3^9DJGXEisiw8QWcxth}|K8A~9DBGk42 zKqNKD-Rw0|^u)v+YC`FK^evB1=Kzfgr&g#Pt;oi4Ocf>V>+r%To+BuY*hg}9$shT? zK4mr|FQx(Bx|F~m{V3a3DuHns*Lt@8f~ETb9GXPWlpE0Uw4}=Iuqv^4D2=gFW_Kwh z{j^7Cv+>eC?`H|VFBZfZxF5-xA?}(aMR4#1$P27`^-7_$UjVp!Iu^z(sDVC@gNPa% z3W}|AISJ5Hk?Zru!!ts)2GDo$XURugS&`5@V*|8qeeQK+tb8iHNFtT!dJ*zs^a8m> z87i^V1=w4W=8xH2tm!(ZAz*4PCw>A_e|ZR@6fK;N>PBF6+|tYrWvj-^Ax&Naf~4lz zK(_FM_&CG%>7w;D3u@c|bxO_DpK7ZDwV^+ur;WfMZ8JwQB-eit&9uJreqgfS4a3no zx;WS7$?~ktNUWMQnj>3Oi=={V9W4jeEKi9iYT>gfq#t`gu`jtE;6CXvehPFN?q^5V zqi1G69?CozXS3Y1$`304@G554_MM}lPlXvxdx3nL0pyY!;cfF{@Qhx1b<6rgP?`UD zRj5??W`)fq%u{96s~c);j2wtE3#~<6@2;>PeiXqNCZ`GnNlqz+k^59Z=sqU6fhb$D zWjv1bx2>&=D^Pja^M$CU_^*vKZw+E6`9hpUVM?6eaTsR1=@pPp%CV7$yjdP)3jO7C zWhv&uD-qgNH&REk5+}hH})!VH&14`{(qb%2g74Lerwecf@ z1JZbro-A_i5(3QKMMy99a%_lDq=$Od?s0omM^`5;d_k1KBChm#$Fajrz z*M?IOkxafBlGM1&aUcbk(V&fVHqep(VaGmuGxNuPZPjMMZCw!^;( zzcUmuJzc9z`%%`r{e04NT1MbZkRJ+tFPa@*H5Hu{! zIk!Yh$%`eiZ6uShUVS9LNufo<0||Z?mp#;V8KfbaMT;0QZX55HEJ|}r9NUwOrko2# zb=C>R^cR-%ajso_NtUXHe<Ch)V6u8gF30@=+~?K6uO+aA%@e;_*oN>bpu^*wISK zk9LV>%DU2uW&s^z{8LKAN}P(M$q~QB$UtH06#7ovu0D6SD6w@?WsCt|?7RFh@00J3 zEyc9E*frpq&T4;}%03!fz|5mpmqOOaPIIRqkGU%s@>qAJau_}I4^hQYjQ&ER%<_YC zk&Aj`3E-0_xci-pg4nERIm@>y(v!1A{ad2%3UHekKJ075>PRo;Wg~&L%bE^l3Bcv> zcIpt$!GHonNTRbm#{GSV=^6_~V}m$Qq61aR#K$we6tQ-B(U!l5clyT;ut}l=OjnStKh8ViDj#TxTZV$Y#ojmXk)8 zXrC%KptHUk^1Ht|j=6j_Vfo0D;-xJ%iPP6*VVq&)d{i~u*fiKM4dOQR)Wq3={KLM$ zMJNlaEv~*bD9KndX*7?EC0R#Ja$*Y7wUr)+ z%J&D6k=fFLYjIPuIYAMojXD2%>5>TB;~Q=`2L)+l4n~0Zve8tI`7w4FtA%4165s>j2L|x@3v0pl4s)3 z+*I7jNSFM++4sGApL4gFwl>yK_lMnSy~Kn3hb)?9oQdU?afmcR1%4k+Vt86wa_07< zNs4je;JI0jeBgDg->DS}GRGXH%(nBl5vpTsOy2uEnX)ypoez2)%swY4Co9H|*NHVJ zqN^3p=Po(-om0$)RfFnTP*5uRn0>dW+G#5_ zlv;f{(ffmJ85QWtiVMMg5faH5MN zm=DnUTaK0@!;Ae4%eV`=6d8Sk+G47{!`}t`iPy&8{yc&3tB~s3%H!qc#1}|s-#ik? zP^LA?lg%S5pAU#CR;HOuDt?W|m)s7Xo@*3eGQR*XIG(zzA{Z3_`9aXI6B77JzN8Ha zZ|!wa)2iv#-p-N+t2iIp-9rF4DyClk@P;nHSefMz@1BhOT*X(SKk$I!;2z{k8#Xu^ z%nf6qN#*#cO|oJdXVij{(kO>wVmdI50rkOH=_cc=|u*!1ahukIStE!SP+ ziARmxY9EdjiCCYnCgHgIz+DNg^dUuOhu|BaZ*Pgn@kFFFL1Na{p|U>Z9|Df}K(TS7 z(Rz1@{rsb6Qyp=&gN;8xxy-I7TL`iC$kw4_Ki9<6(`=t8lOd;(`l*zIII0t!d%BpSgdR^)jGs9F5%UCmaDZGja!rw{D=>HE>3 zpC=!dz^yt9Z;#LZ{l5StO!XtU@aNMW%6;4WG4$lcxG%rEd<;ccc=s+&EV2H;9n}y! z#@F_rgUMefGiV45;A_Y8=A9zbN2O|rL6?H)as0?8wt04;0l?4pTAFb$Ij*J8a zsY4G{%iA4jabF^2lk-WdP59#GoPBd38^+7zIM+O??p1hpx%~5qJ7;SlNG1`t8*V2O z>nD1-%zngkST{!;R0@>7C*8BMG4xgCq`}dpRP={hCye3BHHRm+nw(H+`j#a zVUGu#cA7!vW<|H2J~;DrEJMliPX0d3g=Vp~s{Rbg(?hH|=($YgTc@xpcQfMe0bdtD z0ddN`^w6|wwryhuz3Dh_UMgD+TiK22s`BUORUvi_ApGhbP(4$7XK$|Q}TMVC+ZJ69Xgo^{O&h?2F9GSx5r^-rhr#E!u<1MWXZMZ zo%(CMHEBq3cMKWEIgI`gtxcQ~Pt#*(Ixp*JN!EOg1n-2#ClwGaI$pae4RNFQ>kZb; zBSGQFqZlKe4|Re+M(YtJNW631%$J~kk&r`hm=3(xk^+K$%apdDcgsuRBc>KsD{@6Q zWO6iW>7NBf81}n!O@Q~RR&6Q`e*+EZYMRGUy>~BjDLWP2fg8THw$b1{k9LC6zle?G zd4G;V{*f%9;$+SF_bbcK24PRw>GNQX)=Mae4xC<2G9A7V@9*&17UoMz7>iTJ%5!qW zRJSufE}*Ti97n3Ux;XLB=NG%@NU<7t4$LS;n29&r<6^as3EN}ZOjY0RckM`n!@@Ha z?_xJaM>kDiG1zIncaS1fwLk?1yx^zcTr`qm7{Mt5&tf5r|($pS(TBX|`;b^EBRW3ap@U)@bo+pgh2 zx~aVphy}5RukQHr5>JHo>T>gnIXYUUe)WdD=l^t)h`V0&0GmLIMXN$%+sR?3Mp_xO z;1bu;wWIMVsIlB)s5j%rOL_x;+x>B?eQ6O&u;H8XT#DLpO+CJMFLS<)0R3*cnA8@t zsw($olI7Q3d~N4F<8{)~yEmYEo@+rt8@Cq+jT}9R3+dTQ20kdV=TDQd7oYOeyWi|H z;)DP8p;83Lv!`8YvNV-?sI|6&m1>?qrjA1^d-E$>y^U_23$D_`_t{4Xw;wuIkseD( zT<}_tv~kh1x&xqcWqcly7|Co zk6>iDf{dPQ*#=9vDY}j2$SgV>He8(0}Caf2O9)F(( zk*&&8Ad%Ygm@1wtKV8^^zq>nqUvYIfJ5g|jnTd(XG-?F?3T1mvPL{xr5lPif<;~DK zP$MskS(8Xtm5XIE4_zRp$B#%uxRiulzT;B8MQ?&zLb1-JM7zj<^sL4NrJ}QOQnhvP`Nc{E-WEb-Rr*2j?THz&*Le}E z+TvEJxq4?-;#bG!svDvY+SR5%w1TY8QxWP|_bq<(MZdc5y9;c4r!UDwCnAU;FG87U zQFr`+3+1RY;SH*LN^bHw^f*Z`S|qM_@q8MPXgPn`9vU7nN!EKaDaWc~9q zvt^R-$!a3C4%-I{n$(F(j*1A?#(T_&O1j1$O?O>Mk0H13B8(>hP>X@;4%P)Pc)?Q> z6sx}7w=1Li{-GObPgsgD*o&o+b4C$?{-d^1qP#{GlA>&Yb~LV?_o={AV5N37HKjX& zW01Lxw(IDOi5>nWkAj$;POi-l-8P`ZYi8Cc_z zm#h>Q>QYtWQHqzk@mVmMge2(4KqjxEBPekat@xJUWJGaofbQ?EzX)@rZKv`FrF}Bk zYh2j$Z9}u|F)JhaRJdqLQXdK}M&G<8CJhz>+Xfv`Y5=IXDQ3iK?&wL{r%)M#``@$^AS0yB^Yo(>x% zR1_N8b-GTjsNh#cBPeykZS(#E70d1s>dqaks9mmvfZI@*nl>m*Fd$2v+qyr3_woH3 zf%;4X)-$*>&uukSHqZeO5@mmJw&)ah9!mWX=z@!|7?ciIL*`Y*|KsT`1ETDs?QIwb z7`j73x*HLs8)=d5?goK}?gr_UkZ$P|grQp+B$N)N5vAe(JeWlJEs!VD%ROCVu7g{r=X1n=L1Iw}BEv{R#ZD-crrR0Q)i;oM zh9K)^u&%K?GZE|FisQqhiP`vyz?$quv>HYQnbox^q()!|jw7(bAlJdhN(o(Qb=K~J zpVDLg1Y2a^3ScSo5)hXJ-;156q6hTea#ZuvO@Hqt+%@o~=Zb?UK%D5a7D{VraqmV2 zzKV~GMXRZ_MFivLL+1y%iWcukiC}C!E3}#r{`_opAf-cNl^DjjxjnpzbX`tEc-eCV zY)QcH%vM%i^9I#12p!sLKb&WD4+GV&k7Do$8J3Ujuuz>9O#8zZW>oB=H+X2%aD#j~ zV?L5Aju5&DI-4q(E^j#B0*%)(wVVkLz7v8h6m}KH0rWMnni6>xjczP<*Tes9KZyJu z<7eNb@Sivqrrw%X$CZVtzaQqu1>Pus5;e5dcL_9`xh;r%;ij^YtbzA>Q=g%Uu0n;H zGdn5>k3|Z`LPolc(ad_tH%`+gV7mK^!D)mfQ{1HD;ZsV(#+8hBi-VSTyWc`_zVwi3 z@p~184H|*ZNga|p{8{PryfdrT0;oSdzR{RSDdo$dGt#A9#JF4vv2;_rF3-5D( zsSB-jTsoXQ1gkpk(}>wdvfF>BuNniey?Q=j-f=Avs;ocTdW!m$xn02})}#KbSGeGQ zs%n8G~NnIh3d1fom>E8VUnKG3znx?FNB$Z5zv2C*PIHKdgm!{3!DyvC4&jKo5Jt1_^wQ;G#V`A z1w0Q)^mCvFGQiz0jphr+?Ir%fB{!Yde!va~L;|ko=nI)aoa5c=P1vTE;IXs)H{?A6 zx82tVgHGjx$zYs^hYN)PXAT14>m?*(oSqsGE=y+B<{(a8$fMD#v8`XWU8;a6wC%oc zL`4#0e7!X!B!w3*e{hluC=&BNfZx}q>9xDw(aoO7? zlR`;rnbY-lNT)y+sKla>#H~Txz_BOa8A*9lF;k$HK;NT9Qfdgc#7Uz5H|1G%I^Xu{ zcX5KQ!-$RYrfsN}pUS60?o^Z(!!cv94V3F;!u%lj`$+;ZL>LwbyEVImk)KO6$d=9T zEy$@7zeC4=)SS&j?IwlE;3Ti7BS_4lA1MUWI|}L7*hUXnATWr(nKVWd3O(jSq-Xy< z)(!kTM0Sn#Eg}zBTOr?IkT_k`qpscIlZLCQ6CTe?@9S%anEfD>|A>a^de9ZHDihfc z?@hJ5dv~4*892TnJI?fK>H9^|C7B?69wkVDt2?z}w($ET@X=QZ-CII4Vc;SEv-eT+ z;x@1Z#g)kk579xmvZG#6t8F-5`Mv(jfPQ_dHst{d{%b?+01)C(@;8;V zXlYNr9ih{D*6C|912yGJPEa({jt<}pBkruC7Vo*kUR!!2v$q>T%eL4=5}c2e&Z*{4 zoQPIZ!%LaNb@245wcXta(Kxh;P>&9Q4HHL{Ii*7fX@!x^nvZdgn*9%6wp4knl87`c zzFVIXNe*V$50+y{GAtiA{JADWK35Z>vbg7-^4x<>WtsxT3NX%pv;1GH9VTWN<+wB# z{U?Mb>-u8iN5;B!@+(J2*KJitBIS$PCbGL$+TcrdQm#MPP-IYL6)8{7pey! z`p_7QcyKE9=9{9boM?5+e!OFDG&i2h<~>%%D%P+@t}8~JTwoHevH78Ap?GUrj6+A@w|fie}(cK+yt z>i$`T%!=Tw1kK+&Ksb!bI}80IA{dYxaCi15SeKe63OsHoq1fn!VH;UeFMyS^&i(NF zJAth?f*H5f)HnzC=YH+g$;o=D7rUmXJ3{_v$vH}C41A@?rQ&)iUrvuWGYTFOms4v488RkfoR^GVfuw7|A8FcybWaA-c|l^R`$5Pm_&V`#zYt6MBI)FOcEHJ}D+Cw0H7L`JHdeGwZihpQXE=CV36xbUnwdjH?N| z)f;e_X|^C5$_@_i{rOp+KTE{dv)mBXIdpPSng2&IjX$-;Ho%7!lfyg)qbB3mQDoJD z%}0}<47BN}-@YEybQFW}_lemh|1GEfGpqaV_;U~4d)p=q;bUw_z3j@tCzdu|h516L z&}NY{8&ihzgZtW*_?cKuK{q|iDnXlI=uFh{EoNbg+z-n#i|gD$Bw|CX06^ooiVV_+ zkivN}tn3RUU*48AuNtc>zG5fE)`*m~+MXX1h`odoxrq0(+c0(}b2UOd?AhYuR4(;V z=lMXFqp3#(Th}TE5zxwJV=4y>U^o|jynBBQeC8E2%kXXVs)`{Nhx{Dilf;MV<7hpx zxzex~mVyy902C}rvnOs2fXvct8Arp`s|6tM*A{2X`vuBq?k>Udc85Q z8z~skrQYi-GduxYXKlZCU_Cq8=ENmKA9Q=Q*?znbW(BSwrlLQ^TbpU0{95{2i;b5| zX3jy7q=tOORILt-pbe&d5vQ1ABJuP%BBgc^nMnCjpt<4=_uQ9(*xW0IZfNTq2}2+k zeQzH&;-Dg`oikh94X}w!D9)Q1IxqxzyvH%Pdb6UwWzV&z0%)eQNKAFesO>2 z`Pry~@>ugKG+*lFX6#OyPveVS{L{YT563GW#?GQLyK*2lLkDrO`EOK@?aVE>UG&bi zlW&8R6PrKRx?&1~1JVe|_2xrgqN8ms2cDgQk_scTys!&-i{6B{JTx%zSK7CSBk(DE z9RAw#C1tt@AH;{M*8FV9vqdQ0r~|ldkya>5ESbS8z@rY5M~mBA6~&G1dX6qTnaNFy zOD?=<1#xMtn=3>5+k;M))MysXYxxpoj05E*ZDyp`k1~`{f9cF#leNzh#Hx{Cv(EIM zzcCd3s^@qK+as>5D2VG5FagnH488A~ojfy3#4UT-+TjO29Q;c%GS0u&J`Ohk6A|ug za_mZuIQEKNuQ2>EbPaDj4v>%5ukHHc&{zk714)mnyq(=P7%7(LBYR!j2c$83j$jZ+ ztPB^Mn3#XwZo2RnPyuVA<99La*pN`$G7!cq*N1+Ght=B-#1P74xA_#P7}Y-GWlu*3 z%4_iL%Squak38Gokv&2=Usv&td6hS|u7U~SxG%mthA7s-H$6V+kWY-N+K}sxS@50m`u`CKuYmx*vp-$r+LtNn@3Xq+b0#g)wrEvrRBjGT z=W}{iM&+@TzY!)%*(K4Dk4fOP&<10?CN`a-p0}vac-el2k?g}Re_w>DkT94|)v21e z`q;O(`eo)i0BgT5D^B4y*XPp^N8hMQ*OXXP6LuCG$diLn(uJ1%KjGe?Cfiv*{%O+B zOVlu$)A)>JKIgw`xt&JDWBz));uttUy*or-Ba+Vh2<%zTQokj95syfh$r}w-aIJxHE4yPSX!M=>z$MSR=`t-0m_>yz6ZG)EL?(=$5ld|IKO+Db{s`p(!8kWRy6}ls|Rl5 zO|4p>q@xcBc){zr0N(3Q0)eip-%#&l_ zITGl4K#}#QBMuNCQWwDe!9DH!sucZvJq-4prHN{k0J%QXwLh+P5f6D_--cV<`il}m z=&SLdo>Iz!+KmIV)>DW&*Lc(am=1IhXu@1K}?w~G=)dEC37 zXe{W~oqyQwzx|CxLg7$~){dZDTm^5kQi zME1l*)DI_e>7gR3s_dTCSyQc+=b zPeu>WN3<{=xqjXp@|$2pIAT?z-&oljlqVCe#rYEVO)vXxqJZEB_)~BhR2>laN?@X7 z%)}A-zZ?%UHA6Zo3rqkOcMbyGRa_fS`pIO>w4l^J%yI*vU^>fYZc(=0)T`6LqZj_%(@VRqgD+mTF zjW8kd;;QzUE&M=#mksD7R3R$-8f94+gDRa2O=LE{)upjZyyJzYxhT?f!RnmQWobO4 z#Dm3J?W$F?68+3AaUFTyHR;j*_wrPk2)qQDO+D)vp@FS7cT+>JfQ1tTnALVoj_;f> zg8}ME?}g8Pf%vYAL8Jij45)iWXlVaCm!x&ugN8m04yKYM@ql{2clNI>+T30c;X}Vi ziorAsWBLujXS{jzZ7{`vL#5^7$NC@_296)U^?jT$r39H$q^f zIBX`230x_4+2KB20A;1t<>#%LY&H;6@_f}4A^Z=QZ^V)@R3sCuNd#NKE?R-97d|B) zTIIs282j#}FA84Jdb}?Y_4E%eTm03bCET{&VC6xYaB3X#QL_8<%e6pG@EX3Hu~{BY#yxb*_CAb_pFH>`g;CNd~+Qg`0mz*$)~IDtN&*5QPFy%;Jme z1V!MT+xeIsBflHsohtu*IVN{|xn`mSxCA2<rX=_f-floO2%}hix@l_BBjiVj{l7&dIMCJj4!@feAQ$q7Qw> zv_KLxsobDm_b;)x?fP@UEmm1AZt>W!)Z3Z6bd&gLn0O)=j}l0$V5fQ$Htp3HKSgC_ z<4CAaMZ&y^c+Z|7U-al6hrk(M!_eE}^yA~Mfi%Gi@Lk;~Hf{xq0g&Imc9v*dZ?&b99_(^cQ+uv7W-0j2ZAWZ`*UXAQMpF77HQbOy*o)JZWOgdHotNRDy}}(cWP{X z)neO|aE$jQUuNeQ&ZwW8KpiSP={tPE8!`3T9KM$trt9qZPa$}K`SWJAn+Xm&T2)dQl#@Y*{nw{=Vg#k>;18#s8&<=)~xf?dF6P{#~^ax=Sw^Dpb z&L^|yhc!$F*k~#yRLXlEfmpXp+;da_M}%CU6K(zJD@YPksxeZ43ey;DgdJGHf+&+AAJ|L&6>Ldv z7{w@5sDAC5{<=27i6kza9j}=RHa(5lmR66yAph}CC>d3f&4ZiulTOX^lln~EAqT#3 z(j3v*&?xat4VTa@ug;^S!#`AY@c*DM2VG_sdpiwxW)^Ez3m~_zJMuf$1(R+xq#r@o zk+JI1hlwagu=+;l7P6O>#Fd2fhYo7Uy~!2`#?lhUU~kfe=$?>Xtr9th>MvG2=kul2 zviG9vW9Q00hIaaYvnRKUJGFa^361KCKDSKpRCXkit;(_~PVT1k58etRGMR=za%xj3 z8i=r2t(>{YM1fVl$A?yF7oV%@4?ipAANx$cH>=uSS-p3on2k#}PmWhf3w<4-YO&8h}B0o*Wehmc#_bSy!q!+4G-}} zekcYFhDCQq912ssyebyg`O{8UlXWm*7q51c{c^BYvYkH2l9Fhxr)$tg*nv1w+ zncjrjcsr@T*?KNc$IZsGPgmwTF|YUu$7mfPVdN=?Tt4-)&Dew?B?=*Qgz~}}w5IP$ zqjjdRA<6n-)|}1U2a*sc8W$j&qld72F-yRzbm|LtmK`{`6|D^OkKPx1>%3-a>tRsJ z%pY$y5kyRTLANbE!N`Rm6_x}%WGGrTL(AL*8okY7*DHM$dDL}a5h{M|Pv!XtI#djov(OQ|>R2M?-^-L1 zt7n)e=F*)-gdc>K$LdAdr%xqjpIEtV%Ld7hF|Li?lbt#r9?Hu~^y`!~m6J9547e(=YgDC2&3IdGe)Mym z!t@p5qMcqfa^Kw%BTnU?q0vE|^;0nJRf==1-A)ER(1I1UHuoJUz3*itq;}p9NbosN z*?HKYd@UZCMMN@j+U9#C&vr&s=Ro;c0R8L5^JZ^S8X^mOUH|r#nv+oc85!Ay#PZtW z(K2dh<2t8upr}wD!TXvGpMr2s*Y{3P#%;WsKW~_{Z1n49xWkBH>~&j#AnT75n^^L8 z<6J&4$FTb^{AX1`$X~^A_JZsqyZ@?c%m?osWnXB`CtM3AAe>~JI&Qw<r>~5Ujiu|Fq33JSL;}gEAc=@ zbdNjzD&fv9M)w`byndXBT~Fsrd@`j@L;O>2aoqy4e^OvN;Qec$Q^gS$FMll3dsVFotPs~Z#cyikdf)~Wkg5@sx{ko`L z{{)anx8qfTr}Wlbt?^^bD(#NzK+o#BFQn7BcMTF?wbc@pX>`en8JOW?pJ4rp9Y8nI z-iiS)fp^u24jCi9_#0r#`PXF>ZApploY~t$hgIsT^bh1OzdmK4zyw=Ol&}gs`hbZ) z(gitH05747Cg_rk#*8Mln`}G;Sd%~B$Z^p9I$q&T>4>FlzZlEl%m6>PV_*SHd@X)V zkBAR5VL&k@Fg-c2_SMbo2c5KXT`5+d{6`n_=fj)f#73&<2S6j&qYwchhbLx!@Jnyl z6aA(C4FdHYqe@Qsct~#NRNX($osVp1&5bGF&Z|q2a7%G)0cX}nWpkn-j4I#SHs9{; z1*i1C7RTQp3;J>7>{gs2{v(JlskQV?33KbdMd$>+%%AXZraGwVX)G!f?#eKtl{MW=W~zE z->0?}9TVYgmd%%5(lWZ!SV1+-i^w9DB43MtM?=8$u(k}D{*9|1V zika=nE4!0xj_RIaiPpFdvVp9a9qJ5~Cf7li=f(0VPXM)$5nhJ>@ZpUoOImjz`(_E` z4?xSsYxcPT6X;XAC3vd+*hw(`r#uu#0;x1AAR0U%U-gwx;ueNRhI$Yw1A&3Z4tD`~ zVMYLHVSAgV(Lt1Y9oRNQwW0DG%WAtngPyjg3_Yi=!L{f&j~4Vg&Q!_tyy7Z+`iex9 ztw!3m4(jw7icX&qMn5QoRrUgr$r1<|EOeAsR8M5xQNVL0ndzoCc;U3$+pr>qz7JY&E=b?!N&#YfGQ zLCAdxCF+(%dpY`!v!+6MKQ3=*%ZxS%NhcOWlHQp_v%h(v3^~IWf85`ei}7(}F1s=7 ze=FK<-OK9*O8W0wud~gT&pQxPBuTXKx%^)R@Z{B7cubXf??j{I7|31jfbm;<73YCu zSE%);PZc3~Rv9)b+=yrwDIkHss_7T_Evn9wXl0kRY6<`*Q@M3)04Xbu3T9V|YWu!U z>1aL3e^p`DfQIh_V#4Ww#|^8-tCr(JIshqOJgqPPop6xxImlJoekAm;$P2_dJ={#j zB;`$fqS_%X;0Qpv*f+-2Z8*+`FngQ_VZ0#k=8;^tWyvEeo{hOKqXm#_=8Q4|XrV>G z09e&*3i4?T?{~*hbAk`3k!mw$t94(Fq%bC=avJ|^;(>D5^bxFR<3 zgFb+MZL$t0Do5cCs!%NmNj`l-!hkW&g1{6H;~GZNZ9<^fwHgL?pC6d&ByM-VN`zO z<&iA)m^H1Pl@uv;R;PnSwJuA^2jk$f4B1^hp+5NB89s&mG@(#AxF0QRa`hNA*}U^u zQVmgZdHlfr14OCQD#XG*0Z}kTkC2sYJlNY;%LefP(2kiH?`=YHyZR}-tOc9op7tda z9JI;(YvNqE2W#DdO79H0%Hh)eK#ncqa2Rx;;0eJAC#G(ycQH8uTs zX`Nftf4z#9{ed+M>$U|0He;vsPN^Dh34L%dz<|#FS zMwt^g%%60OwE74X)p+-)4gUff4pPx9lw@%j#Au)Lt7;^Ua-Z@O0xQp>|56G4x|4>a z$?P8%-wa6|coyYwy)UNUJaOwyiBbB@)Ld%9My7zC$`0t!@4f|*u$g;`EQIO4rdZB| zC{|z82)|JIh$A}nq5uzh|5T!HoD>f#%AFA%j~!+Ytf05>Wa-J}J7V-%2T%b=4MsL`OGj)*{f zK0YM5q>q9&!AZBS4)Kbw(yS_lta%MkLQ|M8^m!j6WIuhReS@X`vUK6QMo#=>oTQ>1 z{yQ7jZ~ieVm`UO$h2|ydbTzXehtT6A8pT6=Y*~e}0K8IWtN)=p;Ws^NV)jAW0e%c- zb*`7RarAl)jy$~sqj>Zqf4<7WtC9%5>eX7Tg<8e8j*qi08F=0AL#guBY~*+P$;56Z z%>LryV3~ZgY!yS~6~r;(bN*Rfm|Xlxd2v^fi_EMcBFHm{UArcSfWttit~7R*e1qDc z*=AZYt4W~VZ=ng}Jw1S4c2*~y79umd*)KOPj-RDxc`vE^8qvRl)cmuRC<_TA<4@|A zrar&x2UVDCR~z`rQb5>?`9p+Zs1}U*RNo6!MGbOqHnoxyR)@&O*I}d#_NY`3Hz{vL zE0%j}S`tcRx!y@lAxPJ4pe@%YZq*yn=ik1uq57IRg-u=T1D@No_6FjEVZo%J6?Yqjo3kFo>+J9Gj}>lH2JZJTi4qXm7_Eg!rsAVa+cJN-%vfm9u|>1tqku;2ueLXu zAL}+A4Bg35ks^k%51IVLP9iAy@bm@$bZNsM-g7V8OnWLM^ZF*+>5IHm_x-=bcCrc9 z?UB`e1#lk5V9DW_vxyHSG-$o))mttmfNd}Gd$`_n8)I*6qim6~UzZJ*DVhuALu&?z z$$LqJ$^B6iCjGX291nXS;Dq#B8@L|7a zbW10Npes2f<^UXpQj|qANG7#|j2sdt>yw}S4b=Wun|vRh5@IkJgG!E^dc6tL-LW@h zzvFC`#w*$wO2o^OcL|y)1po>YUK>;?r&1aOoIGYQC1(H?q8C-h>^ZqOLi`nh9{Chb z080pw4miS9v(WES`HOJ#ueqf~rnssL_a)YV2tt`B#!a7$B@G^k>iQzw>DC z;ZFJU`{}IStqv+`p_oti=x$C#?#VEo0vvCJ$2^xP6CfsB;CqE}!o(Y83kHalk8%`x z1CgG61?b(^WvZqMAz9Ke4$yk2+k1ow#ATb)1u?mTl=It0KE*iQ*Sjtl7A73e*4Tgr zk_S<^b-$y2s@52v2Bo(+3txBP2O=!y4=9-EO=rN@jRUm>@HL!M>0xue5?`1JW;}q6 zRWT_YGdGtL6Yz3e7|V6eU-Tw{=jM-6(6g@#aVZPw@|`7sf_?|?t6YO7h}`zu?HlH! z>r(%-i)FQc2J=GN+?$b*jjN|tu}eM4&WWy8Ztwl4TNpU==kK^Xif3*$I`J&9s|mmR z$iygQYQczn)!y{U5gHw8hDG;;5IyPmEItxtcBcM{#%3E(8DvUhclj;pYOfSWU*^gN ziL$HM<0hn|yqxB$52;8~_tz;bh?Iv~1p?uc_(=5_TMFUui{}xG4%v`Bs_0W(Ek7%B zk#u42Z?G0(in7yj$7dVK3sV?P2$NlAoGDKN{P@xN#2~1OWk&enjNsKf1_^3NvP1;x zc`}kEj(HSn8xKJYS6m$i{g@8yi=e38YA`$W&0LmVu_{Uq?kL45p>lKxfB{=jhdygq z$D;%Y(z)sXD{8Q8J+an*OyABa_-L?3Gu+)lhNu5T7|eQys=ra$@7d?#j2 z>fKz$*G(~oiOZLi3~+wkF&DsX`wj?;Yqyg6=^<2y%Ig;{jRxgPp7rRGWVtVhtKS+! z7Q9NyQvE5f=XaL6WR}EIh6NiyChq$FB(T;17hgW5yhi%+PbavVGP7Cxa9+~HRRkQm zl3;azqIjK+hRKS{v$qP6mCD$EJn*1!OtFCGCi-dVWYEj|{rvr-gJt~`wBoZ?_VuDL1Y7hfe|vAQ zs_A04`P&kaL{DFr`O}du+~>HtCHRK=GzT!Xk*8iZ9bD^@T_bm;duNE7@#ixlRF7;5 znNLC;QrB>}+&o5%ra1jk-R*o8>DToG^Ai7NLt6|wpH{ZOdxkCIWb*%p1uY8GcE+#K z-E0qb(K!6-O&x<3F}wV90r3YCMHxKP0St?$>?fUnRIdI4K4EWdz5Z*z2kxIa94_9= zUwvCo8iWNWsF!X`L?RUN4f5~5=sGnxPV9PoZz5oh2N~P!)ZS!eU7glEXy&=jWB|7; ziw<4ld&B=OLtdQZCU@sta}fJ|8q*v3cD~wnFK(}BAUvT$L_kok#I=-Z^2#?c)|n;Y zlXiN0W%m2+U95@40IqB4AdZ*O?cb?I{RoIW^7F;Hb-Vo9l`^>`m}vkaDgEEk3Ttc8 zu$3;_1ej0K%<92e5D!X%k3dGUChv?>GL`#irlxQ%7qR5yTp}qu13^w&QEzFC@4T2% zw4=oG^90M*A6+2~)`!3C+)#~4WrL1QBRtAECeHf$k0u$vjm7=0wHV64oB?KwpDq%` z{@FyR?Cd6Sod5N6uo<+e5^w8`25Z2MzD^ z>02GjQyy7V$Y(>28+f9Ts8wiFHlh*c8ya|y453@iu6q^86*rrmofp3ZO4e2Om$iPA zaSoGxTC5@|Z(z;V*Ha}P6hnh}=*l6WAsQ%baAeo0C1J=YGWS-#QgbmUzO^8r8B>5O znU1K&2V!1c#7qv!%k+(W*;Jfp{DPA|Q>gfsgdsAXL!v%4t>AhZ|C>bz@R@rzI4WNX z>X5I`Y%(s^sg}n$h0iIb}U?s?CX1VL`7DZ!ZVuUqLEX?=?Zkf zwUqHc!Xy|FRjouv93q_lUQ9qjZ;sHfF;X=$!EI^0(GA`P!-T7*=%TpSDHUt>chFGJ z1^~EeWmVM^F!T3IcTg(a0I;O|<_Qs7Qc|L(p+Wa#{76p5`z+?1tHGO7&5Fi`Z6irb@2Ofr5gZ)KGqoDetR+yb%bDlYjGIe|SUxUGjw5fzl9hHWb)=i1NK?c6@ z7p5<9ppVKdmV{t zrmT#~pZ6>dfrtq*3`gQ;XL=Up?51vt=q8Lk`Q!wq8h*YYHcY6?046uSZyx$xaso;| z*j3A58KveKJMiBgI8RcwnS%2@;UoiW;+bNfSKq*-~F8UT*b@O(A<=@M) z3X;KplXQCu^)s$u44>sfKgRj0Y%Y6+8^d&5F^4lF?S)X0HLGCM~2B1UpC3ntcA+u@kW3zk#Kq zSoAfJVQW?6*GQ48F~r1Bh|PUCvFKNCKPn_)#wWUQb91A?w!V0g&X{h3BW_%FGDV3Y z>tIO=J*7q!({EKQZh-LDDH+1?nh_s1ZE7hE^6{Dne*KZ#iJ3R#L?cM0K6qPI?a~+gLArKHba7F!?_D~^?yZG}&$E>>k z9vhCP=sCo&DqX+?7r|cre1ghPh&mjEM4%`U(qYy@P%OSts)U-9HvB89Rh+Yf&F29=vS9cw)s1!_)|v%J7D&7^Po$uABOjX^_W2Tt zQ4DBKD8=Xt9RJ%6O*Q}pa93!tgF3;mg@HeB!nzFzFuPZsf3{TiRqOKG;F8h<^c`DB z91T$?p~(m`(_@%SNiK*I_xu0p1Sax|N48ZR{zI_m4@MyB&k`9A%bP35yuynd)8Io- zO>?k#ruYWrJA}r7f-u{jc4?7>ked4O++gX|Dp@#^a%gYFBMQ+=R5(3GimDh3n^uCj zD>Da&7QRkXB)t$6{;xk8|91xjEp!o`R8=j%yinZRbj`YKO}FX~_?>+{$vgE*&(^6C zQG(I3#P+rh8#UGln%H~wMfKxBQlKcUmUtI29t}dqfG{L;+q5Avbui#9p#w9Aod5Wy zFrt0+Q%MiW_Q91 zL+Z~Znf4!0PCg|s-=M4ATZWD={b~NCY<+9MaQfuGKo4KE{gG?O$56}keI;~27qXyJ zPa>r6yW8BO*XI69jW9zdR&iHYSLz?E$zdsZsjne24oFK6Axy|~^n<#lD9!K<4@i@Z6tit#8W={Dg?4U4Ozhl;i zqjK%I!cl@|C*Ce<6QiM_H3VEM>$U@*UGCuU%|{%$;(QaS;WD4r*yVx%cL}oga?RLN zQFi4@_Z@mI_C|`e9R;bYL@va(si!j-NNNY0yZA&LeZ)4{e0k)+XpJ_<_u2pXru?<1 z#V;_0Zmi(L{0EeqUCKxAI_U-Y3oajsp}n&1lw^VOM6yY#yJ?Sj?OWlrpn zZUb@2z6IOrqPJo3cffpBfy!LXL?%@eyd~J0^!DL_r`-j}=kL|y$FT+q;tuB+Mt2l{ ztfv?Y{db%S=C~_qy_l>h1g&rNDQ&-y@U@Fw2OlWdD)8Z@4X1olhV5Tn9#$sThKVHK z4TRvEwE`O#Bjme5vO=tih*=byj$uo7wJcw~hs|X7U;e$}2@1U`%FaRVZUR4nYsXg; z>@839I=~bg@FV~CPQc~B(oK1o;&jR}V=PzuT3;f1UhdHE2RH+4Z zM*;tYH5tP?TLo?q`bMEltXjv&PL=&*lN6sPIqhD2UevV(bc@zAcB$WNb3-Bl=JH-X z0iONv9rRWe)zwO0vQ*VrV&Bgtd?FxXt^KOq(Zz`Qy$Br`i+psO?b42B3)&8`nK_IO zwVYW~Lm9!fH%`@6ox8UPTIpqZKBJGA@&!nlUbD-|kh-Z7Rj2x>l0QF1tUX^O{!T_I zdDymHTK!|K_9)}1K|37*&WMn`W`1Y$b8}F)W~_Q_CJmc=vCU8`%X**jh87+3CBbjq|Df*LPThC! zKO8h)`MlFCX-H@&LQg4jSzq8UQKw|EnA3(V-LyLtT-r+K zW{-}HTF};c+yfAYdA)BAs3^obbSW^qTlI|?_+Q{Tj#63kR@D;Cs~7)9QJZGJJDJEj zgx#pAX4XvjY0c%Y)y;v9yVtlQL{+=IsZCjgYO*hbqTx-}M?PF*RnTbcM0$su#mO6k zKB%~>OO$Ig_m{btLgVXFSkK*$rT1CRy= zA2CPrk)A9vm6eqkc3h8IY#LMnlLt!4%0J*=METxDkXI(g%Co&33;Z2hy5t(nUEZz9et6FPkimY3_{9ZFa(h_ubcgam%U|6 zdAoO){g>}}jmjD^4!v|lz74G-EfpzSvheYA(RNHBg(2M7?9VoEe@3kRK|=MC2}&ob z9Y)!2*P<$8Mm(siF(J$ij;_Gmq|(v1v6OdI+7EbTpkFUgsS-S|!ljNjw#8XhS@W=C zICIZGi>HOIi2ko)n){`eZP3>N8j z`XQ)I@a5&JPfX!R`DERqj9(w_jX@VJ-_i&6JQ;^2o$=5HS%jEBtwI=WLC zsgLr6#^g*)#+0vVJcPuO@J}3)mpxXtSL;(Yvb(fJRhKicREJ#a?kDawNLNph_&A^T zK+Vn5LS-ukFWUORVrtAMd%ATfLz~Z6WrdZ)`Jbr1^ZL)&$=9{`m=;z3)r(@Fg}N%M z^MNYh38YnYLS4F$&4ah*9Q@^CMR zJNC=2*uQpE3?2gm19pM<-PKLBL4M#M(5NId7FTf>3hpK{xDbQOUA478kR%{I%V7Dc zYWwviV;`wIP0(v3&WF8REme0jyGk~dFOdW4T1q=Zo=M_7;2SL;fmiFAiqPO>8cuPn z^~HKUq!*IG*VZ!4cD%|3m&S1l+1c4?0K&*x!$5>RxCT@td9XS)Rms4}Sm=pa6k_&< z!E30Z8d^KKv^j_B^6RySRVS?!M920GQ4MHz>$q;33BXP=wNZ9)7h1mwBm^f|d;F)!fL1$y5m5ozvn1xs`m*3Th z0IBxsSUE)dukqYyps{bOd)EmTbI$u#RS9{7@w!GT3pihSmK(_Mm#%Ag-Ku=NMGEmW*`e0ehusU@|Y6 z6Z>tT0sgLFzC9x+g*_{pi6DJKB#>dx{$P8yJx~LGI^s)huO!t(ogq=}sf^!-Jp@!? z$bY@ku_S?2ca%#;4^aklJ80Ltr^F!+CW5T2`viB`#f9+OFSN{^|DbK{#s3T~T7oj0 zXlhJBnvP`w$(HadlN?6Sx{E_$#G(;&-Dlc~A}^X7fgWK2Jr|!B&J*RT5H1t>ZYv&R z#P0<~pntBKPA^_KpZLMoP1NgY#xcw9;T|)A6WwSH9y%|N?Ps5bB=j9pLe^0Zu{mpvo#i`dY|7QFrJ)| zL3}`v5Dqg4y#E_A@qUZ2U+Ax~kg=iLQbJ8y9;YbziJRj$O$`k>UEM6cbJfIkJn>O; z7WST_z$3l*YBNyv)nd>+RfZ~kTT+Sp=}l;EI@a!0fP<+d&vKdLXxYOB5)2OnMay(& z-o6Tb6^Z)UAdRQ&8*(Cc7gWLN{Mm{_$Qq~z+bEM_RQ!jA430H6&N$)D*$v)*tn~CY zFDjc85w$D5c6==Z85d7@i9VlN4YC=wFVIe@o}eG`i2U?iwtQ zf-I2~&^EQ6u7iC4m525@y|0(Bfa}F_lWkEKlc9HhDRz$;E>S}K3z5o-FrTRKNfsuK zI-+c~^~w)k6dcQ$Ger;1C}~8Gm!|o8L7Y>+f7_MBnS|J8ZJo82mdYw9D)tHI&%pyO0E}q}zAS)S#I2-s2|&2uP62M4#CWcdwq@d$*Vzu2tAR~h zB8?XH@D`%h(CDbb5&tvK&Ofu|I9h%>R@oedCGS2EwXQefj@T;MCJ?DIOAmtDWD%E0 z2y8oTG6=@Sav$yhK{@QRwcL7;#z=d6J1S#_Y5>?*u-E;h-lxnuSbyLOxVx&>b6wr+ z+k34D>=z+pW~%5~E4Wcd9tu8Ur$b&nW~7^IcSci0Ol*zTlmC3xnYyW=q-12jURcQ) zldfSvO$6TUWBvl0=HEeh4FCY~cL&r|qmp5r8C|k232upBa}_BK{nY-9tA-}g=;vr( z2BHIxP~jo^Mn*LaWXr~u|DLB*qbwaWvsF?NVT~Bx-7T55LuW>lk8t~L)ak_)6)XF_ zzA{4dn_d^Vyw*%$#JN%hCa^_Rs^?GwV^*ou1Blm^cw0md{Va4WSf8W^LzkJIHCKPq z<6NMwMpxb?r#izUNxT%ADZ-{6()mBIO*yy@WSF-NED_CDh-*zV{rWtT>AfK?MN*qM zZTUC#@X?U7ZvmI)Cs~{?+1Yn5>v7rLMggp1^(%HV;!H}aX=^ir%bT4|&TF9FPqn|dUg3=O`s=l(ppeLh$ap(F z@+CR#)8ch(kfjGKrkUR6Xkx*}@3aSnUQBF__<0++-C>xf6#()P!3|7SRVTW&f1q`8 z@`|y440MZPp^2X@L4etA&%HXrEF8w{stcg(a&TryU5m*MIaxe%fk}$Aj_%#=aL3j?06_B9LVD^dO?JyBWWZ`BPx(Q7l!?W`L#3V4W;4 z>op6QfcDFrZ=xnIllV`53smjb5U{XSVbVE+i_!qPtEiE!R>At)>IDn~(Z2C3xJQJ# z8mPpc2^R&|rDcx}k1dULwW&uotA?hGMP9Fxyp7076X)nIN}X1Eapa}c z;#4AyJ901G-S0tNcel^;*=L_;d-gp4_uv2b_Pv(>E}4ZI;X-Z@YI)N`EXy+Ha97B6 zodalfP0h$*V|juKo$k$fs;zn?unGHGJI~DKSp^PVUC`6hW1WpN*h41ODz?;DAXyZ_ zA$wHiuPlZiHuFANaJh0%2rv?S_Cy=_XLY`rQI5Ky?Z}S3f;JP!Fl~NmqP$K3R@ScV z&nV-}pH>`maI}X7zjNqz^KxCp1Nknb9n3K|%hBK_0S?>RqpVbGwaUe+sw^;|)oOyO!kIz~{ zCduT`*ufv|?-A`;i@J}i$vv+50~lbr1qF){Hu-@5n^jvA2L~rQ?l^ro+3`ravbSNk z|J1ENfG5HMl(uT!(bPABgK9+ivnYf2^8y-m5LLtz0!HxbrWn$>55>Y&S&(wH@|HU1 z?bD!zqYDf957rqzbB`?eknmEiU?~H-ox5+T0v6&uGugot47x+790sv30eIdWr?z$@ zlkOG~QK^t6J6%rV*>-?dqioyt1j*_bulBD&C*MeFl(#l`kPtqcGX`!Jvs?gqaFYUu zJC(*=t(Yt!CJaX|M08hyfrb6DEG4&<>+9}w)q*5LT($5I;9glnS_OP?KM$vAs`|C- zGlOZ19t`d>#|{ObjAE`cbTlzzb105hC6+a<1A7VamWIUM-xUODqH8U0hMn#as%_XQ zAR&V1^oN$IjEL%iw>umQopz+c5~st)!$xh6Sp7oCH@|T+9@;O>^uV zo>^cLd;%z+Cm$fjo-H7-0V_#L#55H+Q5 z1xZvy)sRd1oX^!z%H$Vc<1NZrqvOTV9{5!n@ltPlkFcT0_xRqaAwW}^f^U2-^hmse zbR2A=2-dMHe+Ggi%fK6JY<$!40LqOLK1s_b~`-x`3E6-QH2>xXNMEFwLS{*C0krIKZwH`Lka<<)F2EidC zPL-D~4JlkxquAF_PnRUd7XJF{udWgpY*i?4+tX^@VevW{V24AW&&8DWvYG}UR9XT7 zNQ<7R^c_610YfWVfi^bvizo7_Xi_azT2}HRbnE`f$pf{DobfGn;T>6^dF%9EJK@|Hyw0mt1Kq6&-*N;qks}lclxFkEbXY(4rql+HnV>a91^&K#I>3cUNCCe?)N z{^;D4ll>7c)3Ot@Um|a{bZcDqX33J;#zszJ(pIATBbc_xnuOJ6v9huCQD%5as& z*ClD8kbh_2j4XKB7i~VFPDL<5i{nOp`f7udTRO!M9G?DWsDmc8#t4PN^oR-nij9GD zb^0?N0kj^P9=kKfeiY*9hHZ5nA4~Yy}JzipK zzonU)cASwdnZN3QE5$3~AQUPL5?LW}JT~#;1r_3n7k&D?)H9nL`*L&Yvw6+fqI;Vn z_U7AMST|d5kd7P=J_XKuVD^`-fbIU|ixWDM6+Hn|Ffc68c|}0lLXiZOsZjY%pU+&& z>{jf>x!pY5>21?0u}zwYiHX@A_RXcc)AEJzV+#6$YEkE4-NMqwkG&w;>ru4WY}sqE z2AnBZ1&wfoy<~d656|~xCKl$(f4a`xr8EFAbFw2MEU)b7LlWEmb66Y)jb%zK?HqHy z7;_hZ7$D-Gb35DL43o|52x-7VJV|NjP0cH6qVKshSLEqlwqD#sB$xGBow&|36J42iZ@GSnbYZYpS#D tKK9Q~&#)R?(LjMyr2Og3+=mA2pTv}D^l@#G5gr9E7nY|}&6=R3zX8*sv`PQ~ diff --git a/vignettes/R/RDD/trees2.png b/vignettes/R/RDD/trees2.png deleted file mode 100644 index 90b7bd5f12e4333dce7cc12b5e63e638d64a7804..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 24832 zcmeIaWmHyO6fUX=f}j`(s7QlIi_-ZjN=k?V(hAZgT>`!~NF$)MN=ZsfqY?sAFWo5J z(hYa+7ZmO}=iYy3j5E&ozA^m7&5pI#nrqHyKJ%H|Ls4Gx7~wg>0|yQqla{)!bl|`t z#sdd%o)8>{BlTpAgzmaPsD2@pV#<^x zzZe5dV38y5gwV;|ybsyysb8iqn}7pZi1mghqn)qRI2v2LiJ6 zE7L{4T50}lT`KQO#IIjySN?cTB~oEV+mzFN#6>q+F*%`GEP)|NjhCj(V@NB^N|c6J zBkT0+5yO;P^6+`{Vzb)h!OLaKW#Wmcr(aE!H9iVSwX^DIzg1%2-}TaSc>MArSEAKV zU1Bwvlb0E)jTt^m@vZd*8aZ8VKm47F&Ftz^;;?RJr87rQdxYG(cH`(R-6K|s3?+** z6!zSkWKz8is{TGIf``wOpAnrjB3Uo}A^gy_?6mA^UUL5$YwX!iwNHr~bQTM9d1x*j z;ieXu>6tAY7HC+$uDwjsAEn>;&9mJWrd&n;uC#%i+yNH&nc%>|m&ONh;irS}=N$Zj z)d_fU03ZH71%IwbYA;#!Ke}Be#f}D8uwz#x3{C!)`T3_G7#>mokErCE6Zq;gh z=dSHtxtoG|mgXGyA6V+@b2yqmL@qfX>?jC7n(N!%r*Sklv#=3#6rsbOAqYPshdJqJ zu&3CXiqPGaQ=}2MwAQEL<+#LgiB6P|hK5Gi`hkI<(sha5%i&)lbVjze4+S|n9UL4u z9IkL!S{rh52?z*qUb@VA`7%44!EWPZVSC?^-NJ@`=O%yeb6wv?&)WE*t+AyA4RYW6 zx|VjfB6M`fgZ}-uGfsU+anorlfiM`AJ^L-fV~EaR#pbdHY*59w+Qi z__-cQLNTBZ+&}VcurJ;qe>PhuSV`f%82or{bEXH;RtVDl!l%BqFO`7bMFHO zac~Jv(4Zc}564c8c11fY%OXGT9z@?nU7WqP^K0y(==V6+q{Ua2<@Zi{toWM;d;QKe zC~>@J0Wao~!uRfS@O_!--uVa6F9-=3j}55`{CmtnoNFF!`_9KXMtVZ#Ty>iD-*Ld1 z&rYT7J0JHP3(XtuH#$TAMvqHCz^Hv_-#@wUpe`C;EU7-S`+D%J6EcN||Gq8!(%_JY*?rrG8 z0yAFC`LIJjq!xUyGTqe^cyXvW6rlR=>!tFbaXUCJ$c(YjLNMr&q z&yJ9s^Nh8|sb%wfQdIKZ+WQ_MW6m(iABW~gdaZ$Es*BqZ%2N&s+N`yY%m^91do_z& zMo%h={aanMClLW7sr$iE(Y-?waLAkL;Nsp=C470Ts5DFdczW-UQ5LM{f4BN?t`N8J zpD`d_3jKeFF=&z9{e16g;aoMVmRm8?sU81YOA8^OlHZsST}{+&-({ZOW8YQ&=ht_< z!0M872M{lgvLqSp>k^C+!zEadN3-X&(-m(mrd9rYcSW!L^ZlaCK<69SxWod%&d0Ts=xs>KGiE)@fT$l1yjMUo<*(drle}mGYB5w@b%c<_M#LDKD^_X8ftK#1!(g| zFHYB$d64KWO?J!%Mr|+*|9lr%bbq70Qk0Bb+wp!;*oR_5f}8IyGs^S-dPcZ#ha4M_ zP}y*?iMPZ1qRpA@t(h+?zh7r&GPtfSRMovXdXD>IPKQC*wTWA2&% zlXefZeQp(7pUmWfVNH79H>J188w$P)|I>}IYc=i}5<-8&#l&jhQ#sz7%&6f|dpCCZ ztIJFg9~Fd$Z{vz5cBknK#89qjIh~0>1<|vv>r39=yqyUtoW|c=CKv{}X*(pMrO>Bk zFdhD_Qyxr_Kd9=W#7gW!W_Ch3DiFYeoZv%mi>1-Vk9wt#51NP6{d~(sUnewE*lu0J zuGgBZHv5C#mpk8MeC7s@@!9@`&`y)2lul>6dXeQaC0TaTv1}vyod6!HQF?uOn5yJO zw%bsUs?UIr5Qn_TVzX38V~j$I`^EsZWd5^>wvsJ7FKV&k`{uW;W=N#oKN$Ss-=S(c z!%?y^XZFEGP|xt+-2h=s&zc%dlq?s`=<{XBl6aT=dCvL3T$;oe|2S2+oRo#pz@6SC zTooPH#mS$Vrtw<~@nZTmp&WUG0j*layP->t4Gej_o z7vKHET__JQ$y3K(#O(o~ zgbxs8%Wt(AVu6^Sc>j$1woAA3L<*N>Z{c}Kp=WUMYw{B^WR?SbJL^@teWcs2P0OnN z{>yS?y>&LnRr3&>fFLPF`RCYyp?jGh{^Y7G3n^ZR{W4!Cqz9Ia>_%lZ^m3oOn6~R; zOZ8I8QOevkm9r98DX=z<^1y?gMH0xQ;B%#3_{vhdp6{vLEC+7$t<y#vA126qbel zJz>V*kMViI?tMl|V0xu^dtPjtgoH`n%hu_4-_}xY=baDD@_6CQV{d+t?A$dQZ)+~3 zYPw8i>zf*+nbenT zO_Wr?R-u4hrO=wz_))U5hO^|nhGmgm+PGnd;1#>^<}*U}W*wo*pOy$1gU(q*Fk&rN zi~O$1r&zioX(~ZmqsgM>UaQIU%8C)O?bTHB{&zj69H;X}elIjB_@=o|nVMI|3JA+` z7TG43Y^|BE3tv>ZH{G4rk!XEQIz6?AUGCyo>e~^l?V$;e96fW1hRU%Yy)wuocJ>fH zu~)HckzGQ|pZKBRCVh>mdQZf46(lDpM2Ti6=FAQCf~V~Wk67r82c9B6y{mCNV4+H;iu9)7O9kJ-b3)4S<5IUxeYZ{Jp$zgnURg_ zxX>t>DLq4B_W7!}X{yC6;!dNxr2HCF?yoo^C7OsSW0(e-0SaGjCiCtqfEV(s1ap>#CNYmlj>`s5h`l=(KlM zv^f}J*%E#iZ)cM_;VE70(0$DwntMnNGuM%6Sb5ek{3H<>+Yj~d;_a>F5?@ZY+1GNI zr^99jp$Zw`<2jmFY!%*SG1%mL46)~SXEe-gC0$^Nn@E5bD6MZu$_s>3&JcEVZ;UTWTOsFYSDVs>lBcw$n;aF}Ie?G3Y$2C7GYzScGm##x%?FV`%L zH6hX6+;yxiO{W_cbGDi_?LiGr?&i0rp+sLEB@LIc>;eeJht49{A6r9zZ)EAz)k#P&vNiudxe22?32Vqg{H(oUt&a2pq4obkfUx7ZO7j zO{_M_uoJfsn~K(k)VnCAMSS0dFX7NjHv3&bgxCy>>wSziW}J54h2n5XoJ?q6BAgHc z6U%A5xaa9<8s0<5r+po<LU9t zw7p34#`cFU)4sP}Jayu~=|bjkoKaEgqSN-~@Aduxmut$3*QY5q*A{EqE=%r2Y7Rm+fIYMI+Z*+>v7D~VDr)*clL_lHC1P$%nNj6y z6KT{@o0EnGcLU;)3sCg9&KtA0*6^cw{9=fcvoJDNqwilD7|OaWCh72eP#8xH8tr)m zHYt;j@Z66aR6u^o*TWQL4#o2kXD&aG(sUXMA|+|mg16%iLHUYT-Bu7GZHiaeMx{x5 zojEy1ePm}ZCcG>KR7*Z~IrT7^umu6Q2LMGVAMpo?g<& z%RIM-x59@qOsg_%>;9MZ=>pkiWjSPzpnKUKmYtL=ljG|-O})KRsn`9*D!rDstL!b8 z9-UT2%dNX(wPb4mi_r!oqgEFM5H~sc(bp%;vRL4PLgabFa4QLT5Bd(ZfQi(CR7GD8 z5+*YB$ZrI924j|D#@G z7&0u}?n!zVW=s$4slHfUR&;w~KGWUzroAQUYy?=_5XtClctCTkB(*E|aBWzw;!IAj z^MvC}(K1=*2tL+=6j{-EHX{`z3c|DK-z9TFX%DvLUHi%Hv5v_Y*ZEQ00{}r*W3eF; z`S)7@+WWi4X8-DYG+*nu^6RB+)2(AW)5z*4iYse#uIcO=cO@6QbP3Ex1t9iJ)EwBz za&O(lSulQQG|IRBO-$7@W1L^qTYp}j;Wko?@eA0xDivZL>!-Ve9kR(B7h6;ZVglto zk%(9CzBO;A{6ovWgAubSODww5FJ&lehFG(rHkvAmS-NcoS$wqvtL2<-C&ySL7^NL- za~U2w;cM5xNeyARDw_<-(6t})^;B2ZB}-L@uSjCkIude z;C2`!(pkBdb5t*>0TKsBzbxGqEYoRsvfZ*|Gh^s6#QwURHQWGR!sEyQOt1qeqTn^V z+ZC@AJDpO2-2`c5cMT)#b^-+|IuDt|n7sU??0qisjU4JCbJr`Qy~qH&0%$Lahdn!p z5Bq|=;JMA{KkW0ZEEgbEH7m6X+40B7mg|7%k>UMnY0sG_yao53ey|PMMY~7!NlwUA z^U`(hMUQA*2p;8!H68Zi!heH;S^YnBrbcyg4jb=S%f+NpNN~v@uVFKt&q|WOQKp}uCZCeb_G}JU5NLcs z#yQ-5hfJ6OEhwpW{4-2k0$X5TY-I+I>^sz~1jr*cHDk{-PmBVc|1WLZ8C*gvn7OQ$ z{vHEN)9?vU46{Ge-d>t0t9UpR ze?4*!Nj*lf!|eR8C+vMSj?M5-4~Jf-_-fp>zx9!XJQ$*lE-Cj%6d~Qq^VU*FDFwOR z=EM59^~kgSrrr(Q@SvunK`&Zw->YH_@rnYLpfT6756J{Yl2pidCpp?0XOp#U;E9m1 ze)XF#%e^jokReqUBTUf~Q$@{>xLA7T{&P~@sq8k3J0#6r;%LZD_&C1wKN;;SaR-ng zX5Z^Ha@VSy)a7pC_npZ_e}H_rig^u5*Y3ZhBtsHxdLMqe3U3N>BzYBvkRl*!d$$7@ z(;jD)%@bcr?<*V-G1PhQ>9OCXhmLze80JGrl;KG)m7Szgo1_1xblt(NugBd^KtS=qbNU0 z`ue=2a=utF@-w}Xs!KScL1||`X<`zOVi zyuju4XGKS+nqN#4Avm{3?8nI+H-zeeot__<^w|t2DJ4gp427_+lb2I%muI-+8EUUe zy)WD4N&yCFfIG=4ma#mAw#`#m&(t21c#9()cr?;rH!+jrytkMVM}3WQ&*3z5!=AVC zjcVO9`!Y6I!GM%%p4^VYHhf4ID2EV6T~K^D)V zcy;*8uNM@~#xu6BOTXb!HL_zLE+QX{gRCn?IIfrY(-q~CXev(p2^PrC3#TUEaF zT|o7*JmTGV(||DpYO+gMP1j*zyQbr%UjM&#%P}$MU{pJ5Gzjp zeNFCb_kb-%xa%*#?hNFXY?{-DM$6+IJNYm!hIW$WFU+!Kdx+472$yyjoaeWUQc8-B zu5|!ay?k-b*LUIpcX}zXn&j(KxjKb_BrPJ_%LWq0dH_`Aj|}|{X6R7Dn4nyPy703j z;WqLJg&b$c9F9`M{mIN+BV~lYyJXQf`pZZZykJiQ?Md%=BEbtthM==1l)EDey@8@J z4)({?eSk51U!bKuzkqGp6*%5iHkA-BxRHY_AG>85OQtnbb(j9>xpDJ|J@2P zvj64^aX|mOjRC!t=VQ`v72v6vVr{Jkd*(1@fq`jWn|h}A+ofUBKkD_YBmA%&Y}(QF zPlC;7OO_~hJpk5Vn^j7OG_n_!1^0Ql2oQHf99ZSqcPJaMrO8B+?Y>I|PXgRLIA^-= zkO?cm%~sX1eY?Y!4&WxvVB5Y!%_;ylQ_?W|;pTOKo47yH_8m${0W7DX6T9cJX&O8L zZXWT|*>~t~FxV#}lMTESO5>-}XgK=wCCJ$wPqRQd=_xBt{?vd5uj z>AzX^a&ASR6xX6?vDI;PC`2#Hb;%>UO_>4vEjY= zz(?v$qb18`2n=mMdtm4hgbSc^vRh+QplNuGH(VD%0VR%~&IqM%7++fts873FPYdB4 z&>2T!WV0}>X*wm7>o6ZwFDdQs+P5-v{Sw1<&I1o!^ysl&nhm-Y6%Z-lG&gvL&q8wq z-)W~7d{L3+xqmB`>W~bb;J2S_$1znrjkX5W$`BQGO47&D$&SoScU**h$x%;(vP@1T zqMm`rIuqQC5adzOvTH#?Xy!O3R%^N2MnNP|1t0wa7=dFSRjtnAA_4ubO)U?-*0;s5 zzKshA5?=WsDH_0olv2zJl9P^_w;V<$X0!+=KEhs|H(g}$4;X>3U8`DrO6FK!Ja=xH zL^L1Kf>5#kL@i}A9}e|}KmAXSSA0J%7XI8fYr|6A)pTuPtmDCQ*7=V^ zMRZbXKFf=vXHWSG&q*N+a<1$7pJO}Iyi69f zFMg`vpJ3cGcnsmI-dM_W$99XZ$o0oyKb;#$4nb}Pa|q|kz&?LtZ^Z5OZ(E>vV569L zWoqw{;}?+)-1)xo(%;Q=5T^w+VToLI&U=cf?T*z*KrIVU}i-gB81*lOit(xAVjB;As~=uMJTP|} z;G06dctmDvq>d)ndBincvc2_I$ibrZC8DRIavJpi0xmo(N8ub6%d7%o*|7J)0*v!G zs+v|e01#XdU9D260I?1^WQZC_C>Nc7S6>ENEd+fC$k-8JAC?IZ`}r|`3VGNyG@>Lj zs0}5xES#0N87VBl+kWpU|MZsuUEm|Ez8ztD&=>YuBlIS+DMe0<_C3|3l)-M$eD+Ju z3!@0(wUiA!isaf_uKRkfC^cWyveRC2nIx31DqW?gP(#JPugiI zL(vO2-=08JC9Ne{ovl!-3a({*cUfO%W71IUwSI^eZw^S?5H`>(x5wU}V+Jel*;SC` z_CkP*p2kT*Gbjr`>@Q_|6muXT2WUtp1go*dXg-=Pw?f1=z`e=Y*V}F&Rpu5z%86H6 zongk%m?LbUMuPg4Z5)F+NDE#;X8k5L;v6>f%_ap^2T&Ubnnha>5HR$V&fNjSSfGOt zOnxjb^=|-%C6jvrj@-G~k3hW=(AtqsWKUxQFnXNe?QNxq^w!%ra?F0y=Ci+F zb5?-vg%pSy&)I=WK7FzAmQfHNly9ugBQmeW#0=}jn5xw(Cn!CoYVaFj`;u6|(Zf|}gBlnPrYOI0G}JP=dONLvN*FG2m^>orGy z!YNnt>rC5y+w1P>7#Y@bAf_~O$Elr1-?zgY>w>neM&&}1MmY;G2q;RhV?o^{PgD>t zCZZ4|Ck<3~Kmu;rAOtQE{a7yD4dT1ZFR?)^>O;}vYr}tuOjlLOS zSCIwC>x^JZh!2x>TQ2DH_NSMu05BYsq>T@CqUcEoA{~-a>jd1NP$w;YX7VRe+kvDC zyF;Us*|hP_R{X0?od6S3-1;(8ygo#O1LJ@-G!WjlRh6VzVDJ>p%UlF%Iwr&V} z5loP``{DO|Zbq%t0E-xha#Gc@{;y955S|Fq0kI0Nv$sRmD3q&ORCOrDfOVwTdcp&U zZe^{lgsM%`Ms8!u(mly3Fg1TXksq=RdRHBgEB$^&@AIRgAG9K=PCd1Et`g!?ueVob zWHLH+ZUWsrB*0dXHwZsOOx(rJ$5}vZ24@(}859lC62$`YT5^_a7@l=<&4*VZxYHus z^`v~iok4;rBJINddSbp(uPuAhB>%~Ie?pv!PnfXYa4!Bu8VEI;mxOgZz zj9cW4$U`|@bn%lyHeL?5gj2nnZT5`KNS(Ls>Tl2;o0*BfY{coZ->6*l+eO-!3+ZR&kId|(Od7~dho;V zEeN>cqyEHQIsE;_v6@~-_B$xfP1>Rk5zv-)IxoY`4WoRvVTCQaaz~#60n36>U7rH= z+H0gN3&~}(+wx0M`GYZ=@te%ekkhgIUsLe!f`WYzyJ~mw&#Z5qpaAS}Q(bZd$IGsD zMh?xX)d~jA2P?+I9nn$%p7LYxdxj!j-i`->{44!WyMhnNzu$aLuD$X&j1mb0D&SMp zB;^Z^G)p0`vtVqsW>Qic%EMYv$e{Hk$WZqE1Mtz+Hbza9fhsjJhR=~mlanp-{7DwB znV^e*uz2cF?47aNB`8AosW{}{G3qZx6HS?}>(}8Mw|wIE5M_Vd;3m5%zfRs$yp)|5 zWa0mmJ8BRJV#geYho&w~G0Q&FssY0XT?Z)oR#5IuzmL5>ylC6!AGN(P(sA}}t96%q zl(3Wa`G=(k&Cv`++L%7c{BT4@i|z%UVIkPR`8W<90ry(1)jG!=<~IuyZJe!Yh7mcL zlE%fQ3T|_O?J#dw0&FiJ6VWmc!!)#u&19&gc7Mbh68j@;Ar_Aalc@XJVoYum(R$r* z2$>r>O`Ho7*XNx}Atf7(M14e4W~=vulvO;q`Ai$x@d6N~NMq0IO@WA$d7DdJA2(U_ znPXzY^m2+t$y`3`Ad$}lm+eUR^{26)a81}7g7_-4EyPV%ncp;Sk#hb93SQ8j3`o-w8mKR-lXeQwr15Cuswo!P83m; z2g+LAx^RIGH81r=rU)+C<#Fi{k8BHL{d|$7c6uaxon%v=aO;D>+wc>3X72e>VST_u*>j9~S|&>3J!-~ouViH?Ifv(DF<6J% z_z$->TgwJcXpE#XtY;-QeX$xaQPaO291$EN?@n$k+vh8~Hi}3=3LAzrZ#b}N$>$lN zZrZ4C!SjO5tcZls45ksmYAyU3Uxj&*mJ@fk7!ez zgNKV^@~E=AF_&v!_eKq7Ms1prX(PU-Vr6!~RILapb}I6Ly<0T1&@~9-+$a=~4+)Q< z_+w`%nz;*&9=flsQrxx-*CPiG3;knI9o9bs%0J_R(qhbv*i+} zRqhK!7JegS9pq2sdKr_W?fC1te{ijQ4P$(4l4()w8kxv#`--!MWdP0)8F_Z9_9|?* zgfPga_)JJoCIq8GqXKXwRC8?wZstF`$nF|?!5%1|4lR-9d1o5G;1eJeDi{Fs)3;Z- zpv92|@Mj^qY??B3xRJXlSi6mS^@pT2pMVx&M}&DJ=;@MC7?xi`2XTsS0IkMO|A`tm zRFkHt1F8q}bUifFgMkWl!Qaj2ICyT%Hwg#dP<>*b&j#69%cm>a>BScrn|rHAdfdlq*J zDqq7{U3nZBEYnU=BYxzxLGbi*T>=5fO|1!pLFkjdy|rHHzS-tp4u#0zpwCc7s3b30 zDK{hi43e4Qi!HRs&B1;z0A>WWVzg?S=4U_y%G9TOJx6g?~7gluo3TP>H`dS|ykd1YQMjX5?ZkUyuI=jl9=LlzZ{= zp-}{;54jQiymO9(qK_;c#FJJ=gPW*A-5~N9b zf=nhE8B%TB`14ZC3u=`@yr3OIbUz4?0=|g^1kfAb6qZPk0WLTnOtM^Dqiwy4Jn%LR z`@!&H1phdKoLUbeWjb_#gk+|xW7{@(5C*t9=^>nYV)W)c&!=H+DqWqbNIoc;h}rk$dFr#phuEp7$FFkX!Ct*a8np6k2lLBWj~h z18ie{ah3k>DYyi8RKYr@+b8xq8qrDs@ng>%_TGIwoU5n>d;7mZ{WnuM|35aBPk+gM zYhzltXGczrY#$k@lD>KLnA`F`8Z>nrFNi76K}u?H-l5xS7TR}MkQDrR)F9Jen?=W+ z8tuE2($Zc{gaigItKVhVGClww|GH33_svBs*eaDA1(TNgB-VS(07cz`= z$kr<%cK>dQ+OkEezlhGe9dTv?->~%r?Bn#wN1NuV=AUHnDXkN*vHWQeKVA-9TYA-R zn5{ssQbxpCcmhz0p{8x)4W#ICBTCyds!8(r4qSm%8Lz|EW9+U#o2l6R;fP(Hb zcbMg&&}0|$_FDo92B6Rr8UOf1QTo1@iD!Xw+bN`9s)nFbav3fhlr zJ7)~c9TX}OU+M7yH9_Zj3#kQs3qQsAB#z`QkEsfoXeXbr+(WVPgpYLWXU+cc*G5pp zjmfJ>noDu*N8{_(u;+S#N}drB%pB1eJn1l0tl7NiBJb9pj1;F{OftR!yXIlEURTcQ-;i)m-Fh7-7e?m3IiaJ6kV8m`3X#}+oSQo@ z473wJNP+c`)-n)bC&}L6mhGcx$>pG3>X%+51h!*Q9qrzy@i*U{8mRD|nEO@RyezShQsXBO^_Iyj_Hknmdr!$I z=p3AoECeYj;g2c2$O#N)A}$r2E+jKNGX-_z!m$=-^&yA=W$ z5~2G7E=U(LxanJGZ=4r~GML5l0E2NC`ZblZx3(%04&Ph3D$h6Be{ZgreRT;~5*QK7Ob^rQD}glr!_^<#fPD6_gI$ zClT=ld*T}EUG}sK{d(|tF;qF}i+pmRr|YeNjlrWi=+b{`rOgnDf9K`Ex9{ zL(Nr4Aj7bv7yM7AY_>c=N6_rM`4@RmUb=^>C!G4-$b5POI>>6*BGxv1w^w}CE+w|b z>Sa&D@a!QqF8>rCd2Y;wFhuga)pdlgn}t?A2df978^+#*8e?8RVSP^`t!#mE8*ac= z{EpgQjmkW+{fnJrI=5;8TvfFwn^cI9#pkbw%Sn5$N&~A=E6Oe99))lkyf2KYT0s3W zH40ihe93mePUs4xou2v-JB^=gKlBc~0({Gh2`gH-Fo!hw*;mjMy-Qwf1qmY3i1iyO zs*(|yXz7@)8eeu*pB;yps=ao%GwQ+@2SZV5Q#GfOw!|6lNe%TnfjmT&Wo}py48+T~ z;rrYyW<{_8JHqC<^eW1r*(VCme?ejs;gt|o;EZf^O=awH0!)V|WR>htfwp{5={lj& z=ZQ2GXmQZjlfb5Fm7*Pl#eba!+pe+1eY+iszIvdme=V}yeMQ0F2N7`B0+WNEGU%zjdEWpQcBX{aN1 zTdC363P{Os`I=^uV+umLbl}nS?OOk|2D~89`oE9w8OO|y{nTG8MfxG~n;5OKdodmF zZ6z+G2nS!Y?Hq?9)v^MlUJLTD*&KY5v=^<>K;7|T^e-a(Vu*9-Fti#jbpaa`tW{^- za1}qhuV<)o%&eRVT0A(!N?tL=@o6x7Wm|wg-F``W<(*+znHPH+k}a(>e%)wIg4z37uyhjS$lJy1*yHC;nKa{HPiI_ zs4VMNav?`^`ji@^ zk~~wO|7*%2S44hO6pH?xIMV73belzSnbW4}>`yk+8t5jpz!Nuwf{HQ+n8(T1^n zJSkB=Zv7#6(^8h!2oFPg_xDTkpx(P8AoU}XE0nhkZ_<}A7Oeta_fGqe1a)t~kKTUL zj%A5u_6Q!+ZFL#LGZ+!W&Y6jy53EY+NpkAMwl|KSb^t2a(1wm|Sz_Z5e%7RvUG8xW zq?{0={Aql4Sokc`hHKSn9IqduVAou{w2m~lNQLCF=f|P~$f%LD{#6WVUj^|deQws2ET!b)wQ;*4sgUFtg~n}gUgba+e_w%?!zeA*sBd@GzvWPAq^$M+ zD2|oxL-N(Zx5Y{*X5F(r5-(B~9Q=cM|mq8=0-2gX%(KqWzvcSd;k`AAI$43PbR4U{)VSEfAmj z(Iuo`$!>y*d8%e~e85)vzTm(@(Tpo+*!&!D6%`R_q5W4PHoSzw# zJ>xy9AvnPeEwgOx7rrC;fe=x!$^eV5>78~NR-M@Oc6cM)wX8K|LcGPd4tB7=Z{&Ni zMQ}-noQ=C83EoKLHQ6x{q>Cv=dTEMjqb;AZ8j3wA2R!&JixyXXS|KVbn2*dfD|r(F zm)q&I?3+uGjn?zud~qevwCR3qf zvLnVU^+8aScSu?=sX4;jDiufC;j~Bye9i*WYQPJIw~1mh>D^78n#kDuLM-yG>(v3} z@-vukqR3UYW_t_Tl79qy%E^YS7*Eu?D#(h*Uoj{FAD$Uz87FXLO#7047lzTZM}%G_ zM|99wL1g&Vka36}vSwM2YjQowl(o|!&4^d%f5Lt)w+XI?|)oOAk6 zFCMG=nCz@nO?DS^bPW4ciDAj+@078o%&3v9pYe{#Lzr0Zs-n%D-||A)+l_kva;@_? zuIjCdX+2uu*4#g6amShe=+-PQ)a3jOGRk|0x@nOY^>swYiE$0O#S1akhcdb}g1dU& z4~^JfU@&h|pBDP8Tt|`0Zj>kXRaus87M(hk|0Aj|h~4l$Sr3`3ylRbeiT=B8Q}s2B zn?_dhLP2V?+C+wu5Sh7#YG;e`Yr2}m?D7x9B4x zD&x!6#M(q9RG7B6`FiPL(1XxKV*O1Z*@57v<-7hahtkK68+}{s+C>9C?a4!t$&7s+S zwVl(EUdksc<0x_n8E;6jJUswi)=tOnsMEj-6g5~^Fw`D5?lF*yJYVBW(YB^n*k|qQ zb2TX$vy_Rr&7DlD9J1qbHI`4(c$kg=KK1GPOUXlX$*oh1!8Ip?yB^KAQV+!TZHua` zY>afdE&bDqMDv3e;Fo3ONkY{K`*2&v^lkTSu1*<7c{^xKuXV={S&SLWz8~sQ)$vBt zOK_VW&;HwuqiAw)nVEU*7;cuU6Ha z_Bz|4uQ6tbB*!~BojF|7`GqC>2ww0RL2iudM62beSlvGfw^GFZHMP(v(gWq%Mxc_8 zOGYXs>(dFBLYv*Rk($gVv>!~%X<^6#+cQ8H$kTph4LNv?vEDBQ4%e3pg@sBUx2cO3 z*hc-851d{s6_n`xu}0M7oA9+(@^z zF@7jtIBVp}v_&DM-qyQ%-bkZKZPyaeU**u3Luboi3I)&$DIOW{}@5lw@F_(`F zZ9_B8n73@?RJcu@2Z9@%i0y9j;ra-UCm@MDk|Jw~vdp}NBt2yos*(JdcV{)6aJ>wj zDz3=_Gx7Js2uA0ysug0;CumMZ0L_kYL6BT4w&5YtSje-4bfKeJOVkLeH)C@)U1GCW zqE>GTgTyo)zS)!0H)Zt{#`Y8+1uZ83iY#w5?!i}8W+7b*Dexz??8!22(p~xUtD_?z zRjCjf&$L_unW3RXQc~zGXh{eV%4PPY*amH|hDF}w?P>VP2hZA4 zO6>?S^8JWz7!#LaUHD38Td~Tyr@a`z)2EQHPdLuk@?pE#k_&8t8GOY<#jSxvsd&v< zdgP*>m`qR3q6MWThdgK-DuYka{h_U&?nv`8tlu~<_ZYQ_XEN^- zyS1rEkCq_{2kEO72z@KXHk-uCp;k47)jI>R#=|8Y)voQfI+PACBbon*Rn9(_l*#YO z4=9pTJ;gun1K%y-Ct^fUQ7%VMbyMh_N-;>1%HU;4ks|)w>0fWl31U`h3MMmTfej$) z=rbam8?(#_4x3HJwk;+gl>{PX{Zl~NK&E)2lFG#_9JtPo_^vC4bw7ux%|I8xV+=YA z&68tnKarZb>v#d^N7`Z{6Um$ry$-^`>TLWbzw%_UO-D={y1YYQ1lZa$cn_SCWjhH? z(NPFpqdb%*UD6buVmmwr1!@TuH^JBbTk@vaXY19gSO74oBc%|e`%w`eElw=Jumptc zT`1yYbYdtvkrtruq0sd%R-xd&*7V$Md7nyP;M)4jqh|vm%*UGI>#wcfra232#(BW# z2%{$H9GP+keR=`;2ucNTI$eY}lkUfI$t79ZWo;sPVee#I-qopG+X}r#F1e8T&5aQ; zUueS1+`NsYaFC|KBtE%Bg}@&3aspweGnNCR3?7|)Smu~Gthzk zEX5rtUVSR1nb4gfm9lhUdimo zcK0)l%SSs%@z|G~+!|fcVHWw!1!ks5DOt#IZmOXXShzjRiTaVlbS9vPqK;8$+o!_u4JZy8yTW!#gLtlp4Gkk2p zl$GsyfYm^S{_-*D8k{y0*^twl%kJ9_h^C+;_QrAHKd6+%#5;M$T(1yT@jEKun`;X1 zfEugX&?y>4R&Yrw8P$xizDPY!!s>AmNwEm7dl4&pq3H`T1#hweVMV4v7U2a{v;=rq z?|Y=O@Vyk{-%g^)mba65I8b}LhOx`tPp5!du z+o1#g)8<@I5Z7f0^52+W&w7a>MsuTiwQ?g3Dd1C0=dH?4_K0Q$N6pz6a1~8HzQGw$oq2s` zxnz6OA}Qv|m9ZL?!qVugPm%VCJR;smtjrh{2wFJ|!=c!BxF(E#q7e#^WH^VQ^2_R4 zT_6O^oRt-!MMdku!t0T{>~ByB1gFDf)bh75gTS(MNF>r<7yU!A;N_)ZACY94{Kd5h zFK`#C^6ll?f3pTagX0X)ICDguXWxakK!w^)>T>?Yib$Oo`Kk+U%DH_P3X(w+j(z&x z_X!T51pztRJyAKf??Or-ASnyl_kC*?6#>Xo@M;oLKHhyO+6PEIAFav1oqF(I(7$(0 zIwD$me@|Q`9IE74!paPHu2p1$xsh*3{FhI`h5t?0&a9&T&zPcta@%eYeqcMoS zKiPKME)w(xOn*N%+qW(lCke)?1;#3rN4U?*jgWHV(z(CA%g9?PI>35Q53(St<6Y~C zE&{{X{{Al!hQs4fNNtYLe7?`5vq1CFY3hg-5${gVZr`up-y1<5wFCy{a?E$%7!nF# zr#V=Y?z6cDq%Sw^7uP@Qh1~&nAD2(P_Sc@_SDj$@Q#``^JHe*m?mbn?`@0KV;B|^# zRPGz-|8gE^-ek4?dAo0*`EYkTCRMD~W7i2x;jQIvf5lFNkvtm6B_V`na|ChM0P-0i zD5-EpI*)%s$PR4fYivLdy@4haSc2|uEJAJ5a_~k3 zxf0&zNP#eW2;XD-sJg5(g1D7vLi3Nn(h!(3>D?@fOxfMSQusur9@MQnNPIxSg%ocPO!r!>)Dr8w zP*pHQ=8nfk#4xk@tPSU+FjmOt3m}%-3QY5{(NE_C*|5qDU z2K1omQ=Xkdw7=y4-|beEPEh>H(Q0jZ{z3RTcILTnM)%FKYkW6R#V$QCmtXOzz2IYv z{6y5ot9@B(tsuP4teUngUNx8mJTAi0_Z7Ra(Nv%tY7kbiw=|c&pNF{F8C_#upDhXhS}AFMUJ)dg`@%PyQcf8xs-$ diff --git a/vignettes/R/RDD/trees3.png b/vignettes/R/RDD/trees3.png deleted file mode 100644 index ededa55f50ec7b977f00a93ae447b6388b585706..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25988 zcmb5W1yq$!_XY|_B&87)q#FdJq(i#9Q<3g&5CrL#knRR)kVZlY>F$sY=@z_m@b`T` z{r-2|wXO@~aLzmLJF{nI@BKXc8G;q$#8Hv)kzrt9P$eZqlwe@)QNX~!iXy^;cL@1t zN5C&H&4h&&B!z`Z6zpw`%`A;zU}&PPq6DNnUSfPnACoF{Rwx|#Oqz^XGdAVzQ+xXk zEtBqcdZb#`pRFYa(+=awPnfqOpNC)kgx7>4bbR=tfCP=Ov^Yvsx~Wcew#enl`S<0t zb3U|#cQ{`j2>4W*Y1hB2{?t-I3y*(5`j$kDl*asBt|6B+`NfasxkcYH9NOdKOG?HarY66iN!l1PUnPG@p}n2CwNR+wqg2X;{TcM2DM#0lEz9QI83=wvrNrP%7cG#ePQzRhyVTx9pn08LX0lZB$!jG|nE<7)A} zsT<1_dqXTJ&KHb z|9K573cc`!vaqBi_^JHb-pI(>!PLg_;oyBwaH|P36%9uXSsCuvHdc(U3~daI7+tMw zp-o_TUAe(aD&HjCo zwZlKd0s~}%e!|4e_>Ael_XbUQq3?1ln7JBRYKoXy0rP-o@Uyb8^WI(me?Iy5iGQ@z z__rnJGqyil{_)BGwp4X6vKO|o0*`d$|958oY5eDh|1{)ff{y$TOZ+zT-Mhff{K&ja z|D7{_AF$t)aX>VHB-JOL7E#>70 z9o5gv_=!LK*hb#>((j*W`l!-Jd0z+y7US`s-am z7&N3A7(|~xzkJN#mZClc5Iyw!^IaIwsTTx)j|$^iIOzZAP`1JUjDVBE^xpq;DG$>} z*Z|*M<-y-0fC+@N`n#ioZET?bGQyD|FaG{uDJlc?ua3eXeh`B}3pU$SWd3^upi|X< z+YScy|Cdo`c_?bk$5~K67Ca2?l~Jkhu%ag0VW`|)o&NbDM{hlC&}F~5dy{(02rxYPzKU3l7#!ZfrwoV~T$^Byyro|-yP z&wY#1eIwO{Ov$@kh`dz0yn+05_bc(69FHIMfqGu2svSjBbvDGZNo>LE-x6PIB$DA& zyPull^Ef{()u<4Hvm?#+L&`u8_9j+RQ4xvcJwoI=XuDTy?zSl$#=7zI@_6fP)+XN% z0UckxAcOKVDf4IBIc9%+J&!$hDG7;;NdBuJ@2jnRg(`8jn-9#dH;)Ge@~=*J5K6@( z7g9`qUK~2!UT%!;l(iC7|CHbj%dw7*QkPELNb_LK&C64gl#~qbshKHp+L`G5C6~&r z`Z`ffK10=HB3r-J+i`P^y1dYRry z4|VzwWr7Zt{jG$tMx8a`>5utjb9`>EUpMDwv$a;F!ACdei^`*UvW0Jc-JUG|y5-1e z5~G+XN+wFiCrw{qX}Me{z<>5qFnn5gI8?my0-<4(G}d|BzTOnH8LKjpA07|*`4;2i zbW(q-3s3)nrWVf$Y35HpC78tdK#>)j(yZjfbgdKQno{lx^yzws{GaV{`kXv`V6zt7 zKbT!#Suv2!eYe6#fB%ShoKd%drY`=sXIs{GYGr~L)K z*rx$GsH$}eR=om-$H&L0(d@B8u5iEtvCjJovZ>roMa;!&%^K-8tv71UNu``p?e3;) zgi<{76|WI$EG8qcu(8RX8&!yrlln3sn(he;nWd+_+#L!uz42Hrtkd)gI&x94`1PpB znhXr1TDAC`^k=eQUkSw8BXhH~IX=5N2)v9|__(9j>>Swp8XJv10Lg7*IMN9@#qOZBgO$u#^ z+m%P~o@$Wr@Vgk-3T%EIBGs$Cy2S#$W_KN@P9bY+#(A&vc{Zb7oO(HDYd7SSXy3&n$GsWVTI^rT z5#iyv;RGD`n(sbVy51-zKesX+D^NVzMCG9ngP*uQ>SM~+wrN&PqaQ${YTW_>i2-gX zaH<%6dHeq>FWfy3y_BzMKX<3A)`}*}^qGErQoZ); zb2x0h{=rih#H(Iu_x;OQjwH6roYSPPxTdD&5062VA0=-t*YLK7GW|>(J3k5!hcEgN z3V6>kgqF!Ber-NqknT@ncgSBxaTc+zU&D1l=pv_lg4fd)(G4bDyuQ?)&$24nre(UU z*LH?(XE3!K>r=!=`WwA^_Ox&*pBn;WO>Pdo z-Q7{5ORaw97N_l(+7@g46e+dlUHpd2?e8iH-S+5t-WnMh6)}mQo{`Sh+2kJ&@I*3x zT7tNE^nClKl2mrgpDQ;$@uJeS^W^$C{nu{Hf^(ow%Skp(4(~{$O!9Jys|1^T%tw!Yf;s6h$cKl;(Rf0tbxY*-HrSb%Y_E4S zCNHQ#G)gQ>SG&%7{Dk90u{GH#sn%?f>NSQLy@zh=Vf7j5NT}|lvOM3KHB(0bs)HAZ zVIPyOWL@Bsbzk%@6Ox=ojYUjwu9R_a47<$+lGCLm&`App+_(f1CUysb$eI-0%(@#t4Q?3rb4gb0m z5ywwUMJ48}m%ymI9l(fTe!tJMGZo3u#YwoH0^1&-+j9PxxoHLX+ZKFvkcw44&1T>j?^FkGc zN_DH<4LSEWdP{%EGq_iUQ{-!Y6t=h5<#m#l9?HT?=v%xuNMhx-lY)j~NR4x=dc4?1liZ||qUvsz3%b>pDZdz!~CB4y2R zYEiKj_Ly~b@5Ysakd*DRs}LSkOgQ70!|;}) z!qV5u2NbV)n0@6J30?Sx;Dx9>L_{_{P%|UbSiL|?PTS}rJF^e61CzBxtYc(eGm)(Z zDii-M33>U1!Nn!Q!mO<@5iH-zFBy5ycjfR=KCOf@Qny=i2rn2QN=o|w{%cyyk2BN+i<#=!H1p- z!}G_3N4|eU5FbG}q&CuE5*-}WzhMMM3e1z3CC>w#ztIiJTL5K5Fc?Gr2TCEz!JtXn z^1C8Kk`NDqixL09Dj9(=R~UR_QD5aiv#Fk>q5FT`4h}#lk!OA{bpAp+<=6m&Iag9S z!u|z0J|ix@6#4uB7u8Ob&$L^no}Qkb_WASY`0Izo9Wu=~{z%M1hSS0-4}0!Y_?#6E znr7LvDqEkB4wze9RoA#MhDA-ywhS>!!F{tA%gP}t`hD{t64(<`O#jfVcO;-Ih^#Lp z{_YAj=*n3&eKq8-u8;!ghS@HIf%VsDb|nFJ;@d+m&60r$)DnHnS#%HDiU3oP*ns@C z&_i8dgU$8;ng79YVi*9&!HLGg{yhO1p#XI0PDLU8^$fu_M1bYOGJ;tCgreZJBW9cA zUPJjb&-=vu@c`ionR;DDJi8lXIM!1%b^T}~baj{2I)2z{d9UYKamk7H+30p^+N1fU zwL0RFM7h<~Y*xVxvB=~UxGeA@m@-?wd#7jKP7Gv!W{YAg4}Gnn1}yi`$C zTu$s9nS4ZAQSJ5(bKqN@$g=6vqPgYGZ*HQ{mDf>2aHO3MGn3Cx5Zo)PQh%yh$eqyb z9{SXy=6-u?Z-0MoSlENsw|VmEa_IaQ8Tw5R$)5=sh!zV{{WTFof-k>>JuWJ|y*+O& z{-Vu{g%}Fq7baSAN@&8aTC6tAP&bD~QCCCMr78VH;@;nrj1dH5 zv%%wbWH?jrK)trQiXq>>a?nd(*8K~zR%OpCCQ)KZ*bViFqZH?TBqqK(RG zea?z^cP-rv4fkeh+MlXT77^xIno+)U{A+Du7(qluMcItMJ(7}^&c3-m$F|w9sPm!K zcCMm1`O{A2_kBUYRJ=ah+geYy$(yb+nH_ojN_cv(t5WpX59`mH%2BqVOzH!)u2pG> zWHsNQN<>(}iUKz0bi)vXIh<{Q>XO)WPxk}GYs zANK5NSd{;`VbtTl_C^?Kn@K3|$&l~~!0O7z#(B)|kQI=hV8MpLVgAW{>`IdiO_QCC zPieEtOdqZL^8B^Zg79G*(-^Rj1;HbPP|W^pDA+~iz!Bkm#EhebCP}~jLfE)> z&%QorhlBh>q06VMkf$D>8pvBukutottD+0$zQq!F1y#-9TkN>ic7ID`wQ5W)Q|mM6 zjaO5WK!ChJv=tBd5B=nh2%NjH>>RHg7kC89t6nvl>L}d5M(BuPJ2Op|Nz#re`F}nW zqFCyz5*2#kj~Sz2O3LsTf8#IMR%jCH@6$blK(3~$I()s~Dh~ioR-SC~2uOzt!Q^Fb z23C_`JcM}m{_uglm!*V92PSw(48|+yxtxR%KbVU3SrRNP&Kt{Ug%4WJy=q2Al=E&I zsg>kMW6JW1Vq);Rj>DoS04To8A|82r7iBo3fcHnhC#3W9d55@U89fyCN;7a3umSmL zd>*|vso2=qTQe4AxnL)z{`mAAjoa(W(Nv3!{jTpm*}xulvGtix5j8?!(i!XzyYNN< zq=llg_?Z5pc8v+r=}&gqyh8RD2d_99OG=*7J$;%31x4R&|Km$r07kpO`pk#uBbbA9 zFip1WQ=(lLakjrGzqPd${c+^^nzpmz^~K@$LWhlku@X&Ij@|dv^1YL%+v5&jmm&Wc z-xDxHhm>zUFkpNou$HPs85?(5+)u~Vx4=#fGgPg8s$KAYrmTJ)ABr|N!Dz-k>1q?H z%R*)hM2rBbwOorGeTu7jX1f>gbC?>#ocJw>Phwg2*s9iEYDEK-l?!T7sdyj3l!Uk3 zURkUZYMGgtjoaGgCi)oVTV9$U9DXGp1#Hq5xc$U#*hRcA=n6Enm|zJKugEkCTVMoH zz^++hetf+(l4~p0?qfAgtDVM8$m<$KiQjR2rUt~+*5#|*}G6zlp0)!L;jMB9#*QhUE(b(7+kt9*=2OL zs9tW-YUk$Wo?Lv797SPjT5=WxpRBS|G_Z?_0q+RQKZXnoC)lP1CQ_r1D2nPCSc$&V z8e2%|3?oIV^dKZ9@LC`Y1ldhg?8Y(>-G2-iJt*IDvX-FVi+}t=eK&j{nPZR>Li0P2 zwmCy#`#_EYBAg6xMHAHWX)+*9cWR~JP$fxLwbl$YygJ!pdvoh~nNEESAe(}@d65au z$AA1lTPRr4Oiw;RH;1IpAntNUJ&)5iMLGi(p2>9j05^NJ*z>+JQY5_hkB?D0;-+_h zX0d=Ygvgrs*-IbVJXYm>+lSDV;3WR9XHKN$xWcfgWlv$q8f0|0|J*++jOzAhWV4iP z16C{5t~20Sq|ulj37yH#5GaCy6-GSSUtEl`S3vx|V~)X&sfg&es}16+Z9@#^no}(9 zM?6pvfc)bOHc5T>MbO#5A1_&Aeor)Kz(fnab3_07=a)Q?G?P%kBp{fCet7vxW|{?- z`!A0DSvt%BF7qP%Q`je17E`cFf6~Vy(f@4IK9kTee0I7J_G~(Rkga$IFA>2xVzkjV za%YgD{O+m@;3*vmAYfZ>)bud8y`$BRXG8J-Gf^B8hA|1<%rv$p;-pwV65spR&M**o%(iJVc;7I4 zW({F1qoo40KODWF0Bl-B-23<6RiOsmtC>{FmrDf(T3I>3odNK3m4^&2?Ym_C%@lAm90u8k@y5D%m8FDw9DfPVyNy zElSEc@nQ{#{h#v(N6|)3?lNbG@DLBLaTEs^cSQf!mHLVkIq<12U%v}Ig*+m~?9=-* zCOwf{0+i*Ey%-K6AYiJU_q5h<(;-+s!ekl4BrN|dJMaw7@HTmYn^m?r!fd7|SYJQj zhy0EZ(8v3Hf}@i8AP6{cZ)6GziXpIZf8a$T+CkHOJIjacl7572IP9d3Du#J;o)=V; zHe%1fE;@bq|F(;)timUw#y;vMUyQPLe*M~~TwnfS{(XoB$WNg?pU8syL@xZFk_#Wy zVA)?RBs^0w2Q>BcpPAoSeDLh%1oJPZ$+a-K{yI{&Kjluh%gTA3fZ+kE#_ z#cNtFuIbtbDH)HRU{Py4<`zqVk(&tc128U+tea%E+up@|{Djcn#) zH10R2lUpEw9~JJVgqEdyooW7pV36Yg%p#PC(*B2uy^v@kBqE9U3@JIR=V*Ay(s)dP z)2&a%Xc5Cm2ctc-LPY)7;|Q8ULdc=sb*9P$9b}~!Tx3G@h|7LhK`2)^=znEI01r-- zXbN`(q9;tgEoXguN`mQcHJk;KmwcL0#7Fq z#3{_D-W4ID1uLw-E&q43lEA}uVRE^j(13k_PEKa%ARf_&f+#j9!3AZCnII%!lJd-|}359#VWOvdtA+nrl61IPrgGQ*Jq9SI03fFsF zMjZ(-uH1!Y4{d2diPxH7NEIb!B#-{MIQ{q9$pmQ*KAS0q)pX_kfuPfVV_GC&>)t0A z@c-JnlO%(aY;U%X`nJyQyBH_v@PNHiJ84CVBnieJcT5lz6}{h|$SUqEG=(|c`lhwX zn1T_F9=61S-9j2Zuyf|j*YPdJ=3;9rNsN%pZ_inbiIEFN2wjNo0D_Pr^h>hT-5j9%U*gxc#l5&3V4b0Ko{QkFX7p* zIs$qkB<$@gPanSZZ^r_WuRPCh>E9+8+_27qKbp_=q-15|SdXcRUv@VdSCEg?Sg322 zzk~$~eCT_mfSj$EkV)8+Ow59{7$H;5PO1K57ORh0%kY?SiHY{dw2k-RA3U4G)k0=d zwqZEBFHK&#`}w4u)dK^Huf!GW6KP!lVJ#Yp+e1#=f@?ipNeQL)L55A$*|Tjt^DTk! zM+dnNX3W6_DF<{G9tmX>w2LzBG2}K7uZ`v|;*4y+Hpy3dq{NAQ2d&T{@2O;uJ{7m{ zx*g?!so~>m=EhPd!MG0r0posm!Ci9t7Bk8dQwk8klqH->FX$t1Msm)APb?E=q}cfH zf&vBvfHGx` z4l+b?iXwDOBnT!2Od!QuTg7Sz#1AzY^IZOWrDo?>6-CHYlEYRzOM}&PPek7Ogd>VH zqHj%o7Z?64hX4AHY~Oii}y(<;wtK?+a2;OG;5I)00)#vAYjyZZ3`hF7=`H z9xQhRx5C{~OG|9fAY;B|#;v%X6`nAL%1O4u_B^CzQy2&90?xM^vwL754;nE-9@5jW zva;Gg%rwM6eDAYtArTT9YPQlD46SLcFtr2pw$*a|14UWpW}3P6nWOVv#_iqsaLY7JQ9InmyB47eL$?VazJ;@OICE^Jovrw(g zq8ue9W#v7nP66ahCvy&i!vGtSlkpKi6p*tT%F>pW(ESG*KFQAhEM`T0hd(EwC}AUrDXgU<*jKN|N(K>h1Q zlY^9+T6~l1HdIFd6b#=+AL8IZ!B*J!ykzT`d@H`I&1^@I;3U6AJVp`z%1Flw_S(GA0vBMkMW!7>i%q|s zca5AiF|(CAU_HUyUoW_n^XC`oJS&`r!IYt)<4{~PRz6Ca&rkQ((rZeKkH?L1x}zk8 zSz_*C?Kn%|Ja2j#A}+|n!V*hUHVAOP5-4~#tz^0ReZl9pQcMh{bJ@f_8X&D)AnO^3 z=>7w>SSj0Sid8%!*3BY*89}2i-2$fYq?(_{oPaCuD@eg7l&XQJe zOR!B0F;M*3ak%M+;)Y+hq}&@#2zz7mOi4NYd=U3WL*l`*GqtA=opigA0*q@pU=L93Z+<26fkNpKPI4W?sP2KWuNL0{@*GZ_4~+aCw#!7AV( zas~+&cp>7J@f6r`im1+q&JeaEn1f7^GfXhr@k95w`P}LQ1+M-*wPbgW_H@ZWcGvQ$77gXNKJTd~Gc?L6Q zj=9vebG$J)S>vk9X+&C{4LI@Nbd8S>6RftY8p3MSXpJwdembaVIy$Q}N&8|eJUStT;G>c? zA|1RKh4O}C;rKnX*I&Apbt?(b7%hd{Mj9;-(`JAKq59=#$o6CzF-Yu~3=gJ!NevDE zoUPdh)6v)1ULp^yMVo8r=qh)$oE)XNr5mSR=AG~;frRI5 zZ#LBceG>^nfYa}!{p3`uH)vvmCGHXUygMJNc8pB<>S<=lm?IQyM;N zR(bQ%#r)Z(i%Rt#p1^$rWpm}!ybcW@6Csi1Q_K!(85#Qvy)I{SbigyHXm5ULX6oi_6sEz{xF(4;{;DiHtigQRk{ndw{0Mrs)R0EdAqqVln^R*`AH;Wm7mnMA|LO3 zY>dF?unx0&^Nh@GI8l*(}n$Q5knDwywu~^;)nRw}{w7vw{Az{NyF^Mxl6d+SP08TbcmloL0TA zvz~|Q!@LipLxO4-#I{K*cg5_>g*9slNGtplGwuN(WeYY!rEpRfd!4q`_I&pT0V6Qq zd*6<=92X2%VTxvpv1?M;-p=$t31lbyEbEsK+z~#~+?!uFMQA;Mykd{{F(i3O4Eb`h zg1N4!r6mQp3Cxf8yZn5-Azs&K@A7@+@B~4ZK$b5=?|BiOywT35{M#Gnds@hG?O-i__`HmL*3x`DwYC6Gk zz|V-3dW8&7;2+&(ksURK|5SUbw7sQ)TB%L!HEL>B!8egO9!C_n4Gc{i{b`O3b)_;U z6ho6$h#AxE%|?pwWnp1w@>z@BE5e2fRmiyGr-l_z8Xi42M(&Mc{%uD~$wRZ#X3`Zj zI2FW;TKPDNASF@~XiQaOej%ZF2hvF&_rM0f`%t+C!y_mPymfF}wJxP&&ruxPa*aAo zUQtL62UT^MsI>b^%XOh_Dw_8|&_;whFf0N34j2t`=)iFGKcqw33>@IJtgb(hr54yc z0Ug&N4=t%H+a0vz7sYdG9YF8FG)rg>JzB^%9`UpQmZ^AGyz1EW{UHD zTqF_M3D$vCx_h6U7C*G!To_@C2(-a}35y{D087rAXT^)tvBHKRU2|hAE?d^*>*s)iz)5 z_Rt@xFPken>!$y4-fB2nIIg|5l!8u6$`GeD|4ZC$CPAHS?q5%b)$#BxxsLgJ5B`fJ@3d?!(N0J+! zQ!j6P=jVC!C~Nq>dW~EzUrMjBQ?#(0^B-j{MraR5&8wr$`T3|pOw+X_-SV;Hk_+rT zM$vEN?xYfq($hQ$Pp~{@)f1evyg!S^X(vSUvfLj@26_B7O&}V7Yt%@7*hF>TL%bc2 zk;`ZoHKq3i2t6B~Iga}oTR9GC_Hi7ivfT9sZcyt!`Lnn~ z)~9C$tHTUpiz$LP99V#0K^NORYWZlviL+>WUFYeQ|ePVW{Cdb5xO8rFI4vd ziw7O$PL+}q<6Xtc9Azm4=sRBU9FUW2-#oxD@K0LMW^9%xNyOSw5$+5DEUqtxk zmDjyt9vII;q2oRJ{~T{JZC#w)^ct-?SO3TS-geg8+VIHiW4$*DY2I;DBX58b?EyF0 z`bOOFW_Xu3*SVfzIfND+_R5XSyE54?kJY}yDTI@&z%;PkUMs!+DzC}A1EnwNQUMr;K~C!EAr4w zJ{35y^v5<l~-CTG1*SDrh-kCexePHcZQo2v3FX3=f$qpEVER+MoP8#aU=KGGdA`6KKoj55oCGJ5C8RtRAPw=p;h@di^%}7^g+Sd#?((RGl zP~z2hD0m~FEW3|KTo`1!W0gZh)@MzDz!Q5vuoF#E$189*Vfi9yTs|p zX50x11G_5;dOo8WeFpi#L)3{EV@#h4>=9TNKhCD*k?(uIP^C1okNM!wDfMRix%tf! zP1)1vvE)I_SNR5?eeJbKxX50!Vwozu=iDBD7V-88(+Z`2d9p+=QHdV!J*bG6zeeFm zo*ZhB{?o#DAR8O?y@x=<#ihAdv1~UyF)MLjBz|53&6x%TEts&xv>ffXyXg1iWvQ$^ zMYT2&k;K-fj=oxbBbCNu__&mVhqO3Cb*O|dcZ7?K5t(2@L~3PQTMj@c7)P)N%D`iKg_(%5D@^P$=bB1pp_d-2A90 zD&#~b_V&O{GN1%Oal4)RE)@C*z%C599If_rW|IRZ*{%#z6V$#-b?Re)5PdZriJ4>x z1CZ->L7AC(r${(f*YBP?HtRZzu?VDz5w6SRfY4gG#LLn?nV5c?Ehg%UvwGT8g|h60 zEkK?cKd|%S482gis&OK3Av{bo<%c%r2LmM zhk2!VIX93el462v0V#o}qF^p8Hd+2vD=6=nfm*%eTAX^rw8mk(M00j&dUvIGa@_ci zpRj=wN+L;mfxc~VPcxtg9BqGK37}B1SxBS`xYA{iw?B3z&|4stwiAmLN}`&`x~HD! z&LD)I!%;?gxjAXCML}qKVX`II;T>Lj?#vjq8`5$C2g?eT^<*>AageXBzuN&ICcCWl zR!fgQCO$q7-2lhoai$AS*J+WI;dC?3=WsS+Xqwk%3yBk@bi6nnRP9?EjuFfzlM2`p zh`-<>|GCT2qxgziSbAw%cTzeKzh%BEf-)78ftv&sx|oB6d|U?cSqu@`EvKU58Fi(| zILWih-=&;Uzid{|J2OE;B@`r~Voo#IrszRGvVIAR<_P?N#V8rhjA|Cv;-6BrmZ+h2GLYqTm$+q-MpSaEl>f*jID^8Y!On zbhiO(5TR-PJGR;CYd!17?9q8f;W2V*zW{+0cX>fcJ5nC_FkjDY`>h3(JGVvl6*LLu z6nz7@3^B~MScYtgtS92w3Ot23Lwt23HsJ)yb|cfNrBdI&n>pkU;IJMkCC1q+xjSs( zoCdnYev+5cU!Z$3?^16jj;(pDoRnR)T*mBr(Nt!Q$`;kefV2l>!0Uoq`l26qNL1;^IU>aVHDt?rc44PcbvP`1&Z7iz}SJX3HIbw7lL^Qy#xIYADgeuMd!hmR;aKsjlu4wW%vj8)RyikVYWa$TAa2sY(|m2XQzVOtSU0^^ z13EEK4GaNMmYV)YY?Y`;b;1Y>sCf*-rkg+$Rq4A#l^4r?L(c8}nd~og)wtN66oVZK zrN+eXn<=e2-@o>k1W9ZXGU$rL^J#@CM7lxmUV#@(ApdoHs)GDbLgLjc((yv&K&bWs zs*wZ=&m<0;w4a5M{*UTKDifF#qm9nTySC14B~lh3DkpfSDAv_;&|toXSj4HiDH@y_ z!+)6`z^k;P8sy6%wquoH%8pA}xyw!$2-kimrxc3*#Gq9j1Ss?Md>S5#MeplU{U$&R5odKqEDL^jUS3+!u5`Y5T(J;PINIDMv?2e!B<0*K0Hl-xgEn1D_An_f4 zVV`I`@DY zF;}z7_>y*}!N~$zrU|Wk?Jse5w8m7q+tR>!LxdJQs_heU3o^Rjfh=D@v1`k~?Q020 z1aKD18E>p_TDOLziH9@s61q1+*dB8UPMK+#yBB{Lk5L3qlgz;ARe(OtHl#D$NEHPX z#B%`E130Ionb+D~!G=uw2V{LeibF^DD*`&V7~0ma_PxMDns(`I_OdZrethW-;9r6B zHLl~~?fo&wF_KrQn90fL+t8U8>_j|x6?9jNZ7nGQ;_9HHqaZj~Y_JTNfh;Qw4OU&0 z@`igK`?Obwk&|rO5aUk!R7+KE+>ejXZjJ*ot`Dxg@mkY0wq&W1{;^A^2w7L$k0an@L<*1k#q0xN#e4Zbl zI-V*jG9hw8RWjm9^N*#Dtu^}ho5WP7pGVG>;Fob=^N;M9&7FngRh4>(~-8yRrG zRtk1KeJI+rU|R)T)-`fl#fQvKns6(l+oN3w3;EFUK}FZ1!tw0K48VUI2m87IR<)3c z_}~?`f@<$kYsKrYN^){&PofYj#41Q$vH|!mhArwAPodDNkGJyXAAfm92GrQ;o=1|G zf*th^8&Dz1lT%vtGcsf=L6R%DXGZi_-xRZ7-W6p&-`#ePi+ z>YjKRie2^>Y{kB@qnabY>cZJR%VM-V8=wFReNj38f7d# zSL+xf{1+4_5%Kn9eMAgm1z&lgD{)FZhJZ=w4M>WD@O*!rmc7Z%6b{6H&OjeyzeD~w zRp*Yn8?N}sSp&{6d{t9?KUSoFaFm^I2;amg!tjW}C3^K8+bvtV6kW50bO>ebiL}4I@Bc`Ypn}vZPjfDO!0GI6gLo%m@qVui%^t^#oo>5Sh z4Hg#)Llf(>s`iYglfNTfl+JK!bY!Nn#_iE!8f}k>W`?{Tlv;WD{fsQ*VvKE5E(IjO zJGM3R-j7%UCjL8y2y3n4bV7wh|EREqD58>+rGbJf6BM?G!J=9f-JS*|8apu|9Mm2# zgAb^DFtPl6XEHg9o9!+de#EJ0x_&{ivXrvg);>s7Pf42N?Hx8zCEcSdi6ulLn%^9y ziNO8EKW*dhmv2PKsVvI%mELqdSW3&WK&R!rX!Q+8@l~!u>z9csY#~`2_QcXh)Rntx zt4N7ql;DU-qzUJoE1MzE`5%!veN4qGKHnavr`r)~kJcupwOGAV+Sdh~mXhBGI{w!v z5?Mk%bvvg{VylKDNV`8v@}4=k&{_=$?1b5B5zT9mhFd@7)R3$1Qazbowof_|M$)~K zB|W{!@-dd&;*!~pl4xWqmfmf<2*5EbQj1?5bA^9mz9xBZ@@}8u!x}|%M+VS@jwLYZ zGe!o8Tt~GJQE*VPUtzrF%)+rla{X0=rH(FP%?kM#B1Ok%8+$hBs;*kqyQ0LMTE(}i z*225O{QV7DfDNe&U@SPQWipDByM~<@R?I3_eF{pEF69ReuSNq5=wgwJv9&`Da*W8+ zjVa%Y#z$Thy}Oem1BRSQDIJ_v-J7Zq*2`%DXT}S{0ub*bwZHvO=n_?^pg4c_aPM38 zTT}t1+ez(0(`!9*7JC@>hV64l&uRB8mUMQ4k>H(vo7PIP@gm6XNZ){{^B_0-dh2b9 zYr}1lik8=|<&9cmo@(}z1aL4g5M0WoMv0ruK%%T~-(*6iP%40%%K{EWl8q)CjS14E zt#&AnP+&W1&0T94zUw(gJe7*C>&2X_I~k}YOC(v*Gd{cR&2miM+ufh-YG%zG81muK z%-9wLALC^mLoN$Z*G21dB)0Q}s?qXT*V`xW)0W({zq3Dy#>XUuR&hFz4yx%FP($xA zfpcH-31xA=IDbITOv@|1S&bIAjcI_ur-NR?1;T|3vcl?BMFZB5P9R=>&qN-PTUUy< zx>91Ma63WR(R>`YcX0ZJ_=2XSto0L-Ld{g#Tn_|u=UEJ<4dI@A&!g_n)kId>*L}|T zH$QmnPb}UvMSK6Shdqq}AdF^ikvV!Hb^vpkR{e>qtb|Uf){!QeJ>2cx2_y`}vWzbW4_(83gFEgqpjS{+zhTZzydspbsdlgE4_M^sUGmijIB{ITg-7%*JT*0nq*+; z->DzB{#FeH)Lx~ZWsw3Rkp?jn>zY5wecV6uL~~%d7(6C zh|P$gGyk@pD>CF07v6M58&tBF1<;HjFEuARYd^%cj?)~qnH%5-Hi`#hgnv=Atc<9nZ4>ifs zKzW7rKlnXT5NH_+q-H{N8LF-4wHWa4ly*y^KnJbIx$Ds}&~YXACaaP>zn~~5?BeAM zG`_m#b&io2~kAFPClNEXZ|IUKdwc{a7M_&*%rnQf0h-+CG)kETA9AWpv;9a$1k zG1lz;)JZ0|bhu(^jo4FTS;Q>Izhn1fjcUH`W$zw+vF`qz++uGgEkm6hwtCPUmtD8U zaykZ$h<7tCY!IExaWiTy-<(#6=O61xHFj)hLgI-@m+!@ZzotPgB`KK+WX$g|ZM0pj z+V9N{3|)^byUr`UzIWSJqfE{PGHaE}{UB}1welDI8`&Kw=byfhGrCf&g%#so{*YEp zilt$aQ$H&Io{{4Ck&_s2EHtxa#PrKMR0Ff5>UHvFxlI$hBWukjSFh2G1}Rkl@Ilrq z3r98_Xj(M&DwzLr6}w?2guWmnLolrYD)4dNt@!G1C-%gc5h|J>$G9r%pecF={+<9} zaNjpQirC1M`|!-STGH;@8ieg4zyZh z5t;Xdx6K$Pj<#ao54aW2Lrc9&D4{*7TLNDb@~!`$R?a+{sy1xnNEz~)qt`r3h@yU)OI#X0u`#Ph)chu`gCU=+;lx?&?YQ>M_tuJQF4vT~IsWY^)QJ`K};& ztAkyS*KKU(tJA+-+dFOa;yE-*aBt`8h&3jhD>;u%4)U4Q*4O_URbv;D@)i1Pek_1-^`@sqDrExm~Ls9#^rG?^i6(=NRP=$$Cy0EYIN-S z7DF#GK6YQBOSyhaci8A%Ge=&s{-8N#qt!D1CABXNqOPw8`%2FBHZMxq75NN%73js@ zx7S)!`QhetBIS7-c3!XNYRAig3TifKF0+*Qc;0}ecTd95>37|^a?0gh8&(fYc#ol; z_dVOQO^T5u5l&*Zzq>&)@*8~;bNi~2j-Y=*7H z9`=st0+Ec{&4iLEPd{)w;+}xK-;oIzj6n$A7uhr^O!zLA;wZj=S;y#fSjg}y*PfYM zk5pvw-U_h}1;z6%S-Mv`QAJUakxu=>A3S!r8;dfldVL)zO5>@goOxtupVSTJJExk- z@j9ivOEF}NT$gp2WqI3ihTmXg>N*$+KUY0odm*Rq_CeH@CfTIj!DZ#NtN)mxy9t^O zUEo_`!jI}2`#${Ajv%3MhH2klC%`Jx61We6G9hwCJ<3o5EFI|%#+kESNd zpQ6SDqMBSLWMy1h@3L2UEuLeMM?Y?09fAm-26)Aj2P^^P(z5&A)9d{^N++tNhBf`v z(>Ih)zJ70LRoEWa7K< zs9>8z$jv~0HDvov0hd3&4FC4@=%@K}9Z#QbzpL*vH&WLM+rb>b+!(**estH3lTdk+ z##D4G>89nA+js4&vTf>p@@ZHc4Bu?*YRtzkB|Z@vXgR@f=~3Ct%BK{8*SzxS=NNnG z93=}>U*MM;?FSXzLr+@YT;m0=7T^ zibUff9#VgKbrLhjK4o;!I;uS={UEjcM2d#=NV~1W%fb~%^7bQq8RG)|MCO{NucOYM zouV~|P31d4xOgl>+__YF5)d`-xHnf`(o6_^;GO}4SzrC~a~-Mi>%npgBexHE+pbKk zE+F2RSnxU+c!|j;gEz(~T_Nv|I-_PcDW^!=#dOa~83Xn)x4O8e+HM_^6>ASS#J=PO zunp!%5T;w{eXnXnY>%%sfl@br+r@!K%VeswSvQtH!2o*fY8us5d-a6VQ^hnda&TCZ zI%UfDH=k*EAF%7sa@;P&Z&DosuopYl!Vkn;J2Fu4%vND8c%MP0tlxL>3G{R{u5oVt z5s;?Z-TXAUIkm5zBX8*v!Bhvs!%mTaO!r zMMVkt#qz-_M`Itq#|I@?ikEZNaJ+(#b^={+IctL@-3cRSSuTak)xYMu|JfpQQ=dVO zTSi1CZu&(~L4=#MK=hrJwmYsLCZ6ERwKgmc+8vOvsa+q3t~iI)>9ueqb9G~sXn2U% zR+TenNC8tJ&)L7o3R^zY>2*GPJmZ|{kOQ332n819kvMNG@7q^wCRWShEh~LBU4dWl z)asd&E&`FcwN6H}LnXF>A$v1L{02L6wNsX_iSYG7LzkFXnX&iq6!#}H8-G);@-;=K zJD!(JnbI>YHydxX3fuP~<`3`{Xbj}6DdJcU*?i0OJ8H73$)Ypx^kyR^wbrw=im(QS zTd_Mg7vd>vKPxteBc_}KvaY!XnF{Me7hGdc_;+|1Nq@5fjt;NA(X+XAAqB&G!OH^2 zc*Yc?ACTkkBdI@s(U!l8*2g6(FG$unS3#WbgP`zi=>a5~DAu%n!v6AQ(*9O4>(UAw zV-ZM*y?@Lxchu`u-@o1{P>_Ar!qfU0CI&{`qc*p(vZa% zLTpAN)5eVhC$D7ml=`Cb$3N+=S5r!$@(t`}=pz)L`G>o1&3LhH-_k`zT~KKL)~CUl zcb8qRqu#*W9-r_#$YobTseBhC5t@Rpi2CKfY%k|Wxi4mKiZW!%y-<0}Y>m!t=sd?| zKRD&jDlB*CN2~vdV2m&ZtuvnB;^4!Km4;%hll$oS)8xAWl}YIOiJ5~Z3Rin4TjZ=^ z!*Jp=dwD`#?rM9~Xuz&%w%-Tdm;^p8iPz|`iuB;cIv*P2nHBr|mJ5Ud-P*pCPtNRD zMs+h=+W1+5uH$K|zqUK8fj4?7EE_tERgMD!O=v;&LN|t>V1#whH>~`oDn%h8R+yH! zQ;si)beLs%?EE(Z?}M9|s~I+5eH*(An0#n*kvgaCR{7;Q-fGw6tf+1RB^}qT;Rhlz zCi*Hp5j4-d%Ecsz2jkRWE<6SB77;0@1_Y10%R~fPY*9J<7GXT{$c;hCXU`v0e*5qG zl}fAddot2YDYD(Ac`czvn*AsS#$C(I!99P;15F(G-?}~{eWsD>DbRF z>pnw%(y>3}N>?hsYaXkwW805NWHTw7lyYVE)?o~Lc=_fuYr)x}@`Jxk5ISn8jh58> zr)~4ck0tA5bUKAbcMZx}`U4omaw7IUp47=Hv%xtjNo8MY`J)Oz!gP&{-VyGC*$n&i zB=^Uu?B}`u^+Tkh0qXGkn_o#yOH(J^e@~+`JrO>r9JqUg2i}a*QABCbCy|RA&7q0l zvq}&pA#Ro7gDp&Kf&FSYc^D|C38(Cc>~+~iZ#8o9{^d~G3&D*<22ST+6B?Q>4KNTH zQ*5wuz2+WAA8im3Sg4W(4g;551_Vz)Gwjml>W_$7*Z2_2*^oot!Vlf*EmQL$hU+CN zBx_^>KMXDVE+~)frK`PBLrO46_SFX1dL8#!qd%X+b}o ztfc$PeYH-j$e9zyf*XkdEmO{%q3&;66Y@)-En=#=A|C$lNGTDCHxFXGKKvzx;Aw zFkPtw!vH`1Mi#fktoM&i{hOawA%dsTM65i~3N7X+z09XyyKmEh5sP_LE#g1pj{Lu% zV_GN#Ts5c8nV7WAAD5EgyQVDRQB?XP4X}IV_xNDuW**fHMGU6a@qwwTb$#0q*TOdo zxLnowe_pPE`1eJGurEIE`Y(z6!U&-arzVE%G4%dOewsS2EM?HKDH&CNYU2=*L#vQ2 z?$dR3(Vym@h~y|Nw?aiF#-DDPKt)f``;o@t|McL2r9JR{<>Ss+!Ui)W3!!q)g7WVy za3beRYbJ9-LP!zMyVvKDzRE~ZtGWbgP6ge>#6(Ho^{e=vfE`|&8mG6B;Fq{;UiS#DcME?TE9IBPe8dE;Q~up zgxcFJU`!OG%lY&JY*GEv>pR)d8tH|%nKFPDL5HdnvJ`1^Jw(z^EJRMtCypAGG*eOv zD#3s`*b-W!?uiKr&mMBFmx0J@k<`ZO+JMDl!#FQ*zK-Esu3M^GwCv{;7zS^G70CY8 zCF591fucz$Y&}kIeST6_RCIK>FIU3~S|=U^;JtaK%VYZAK6AS?3{LOE$5>`ymlJ-1 zIJRjxXGiq6;g+0*M(CIf12Isfco!^4_4seg%N7mj3LW3}**NR0x`)X{*c_#sQMAz2 zE=GqMB}@+(oXGAhG!rAVso&n-F6r@67f~n1D*+5}7eSojfxR;B_6#yc9M0h~m}@jseS z)&T@BJm4m}m{>O!xozoM$?a5Q72Vt_>YqYATx(}%XGuoQcD+@I*(QKc?FDhRLMOE&FO@nLZj+A;z4%6TPP^x& z=Sfcchp$hyZG6g&Ni7R>xbQ$8Q>l|$IV)(argnP~uAa}AiFz}Rh^Co19ZRu>m0LEl zs^<|4&N|xd)*Kd-tbHC-I<0}Lg^<z;KWB=yzKW$1V<3}!d&Y?R|ZbwdhtyPpS1J@}DWz=h$*)2^Xj(T9wNX&O5|f|ywn zt+mdDX(KP|OK}+)`u!%hsp1z&PUG5m`3T!jL#Sz$S>f$6D*f)%Tk!7}y1swzLI6yL zriISb^O}YIyU$RdH;lzjVP|12bWC|Kji)@vF9>gf#yh~*crvzt!rV03aE3BpDw(%Y zCvPnjhS%ShtP>`1#twbc`jK>3-xhvxKJ16a@*6O-*E!hsJ zthjB0ghO{P8K)mtQ_2F*N{;x$T2UkRHyXgg<5DZbBPTbuOmk$J=M3$51=*_sJ=9IL3c z*$XUCD{~RB3}%7?0l?T&kYxLni*i2>qs`tHoxP^NsPdgme(_BcT?s9ECKrMLe0nnR zTW&WU^WKbnNM}h@2p*2zo)rsli+Z!DxdY_McuDpN_OOGC4B&Vgs)@zY;~pHaraJaT zmw)$O@(^oV)G)HGc75c4zJ1_O+Zj8I?>pcy3U;<1%)-#Ix)P^>&jXOY8$}Vjg}5oH z41tz|#cC5AhQ*VtQZ5ObgXK2wqk_$)`p0cBkrRIelg-Y1!PD~8Dv#=p(#!6)bf3w2 z!BIG)aOOAK^@ZP0tB^uPu}Qk_SKgJ3FCn$qO*)E`Ev3OS-^f*uK07EyhotIfklfe={DKgZ{&~ZG;;;@5P2T zxf{?6^nx}6O2 zNLC2M2p#<%URrto2O8sDekrX4qFs0jrHC*a^n83$Jb1-@<*wzn-BJ)42iFd&86&Q3 zcXMpuD6zU`Ra~XH>?|2i@hh7V#cuL54w-L{-Yi~LmOEJs;94CZLvz34z8Iz>%DyO| z-J2sNMrB6g>hordL&u@|h*7>DPJlE66&*I7Sk03`QlYX9;G)`eI!BFT7++>FLsjId z%S2rI-dW9Qotu(kO~H9V1${ZaKw0=Az#4GmH7VWXoiDn>=CHLvcv9=#jzyb?9)mN< z)_zn(>ROD>k*g6D-gYDHj6#@L>D^~@^C!12#SROG)1n>3cltVAwI8AcBbcplRgwjy zLn&dCn1mTXDpzd_?Efr|lR1ItE#s??zkDbC23t4TSC@MkNUv!Dz3KjySuelwn3ZE` z{TD-$%x)Q=IKfgD&2id-Lk2r+ip!xM&x9JX@Aj}&G~WB?pnE_7qM%@=mkU%BO=e`v5;P$tR-r(G5y)+yw|glGf0r?BPTS6@hVUdLLTzZos=RMFkDGu zl^Ws^^HN-OcJW10uuZAMYk`l$d-p>y%_j&jCw4;3mQ(&59~l=HM^$`q|DU5mU`sdN zF^q;7Afs{!r)Sgr0LFrZ)KqP#M)H7Qmnz{v?GI$ID)6$iNL;zIFGA7&$2^uIbQd7n z>2n9FebKLwT$fB!w6^9*2U0d8C#Qk@2?QIqIk^&P$hEiL0>2>iq;#!p!^6mWBc?I` zWxc&XxJ1}h6_=k^N{~358O1)1%AYoVFi)_Wwb0x5Sbtiyid= tau_test) +print(f"Coverage: {cover.mean():.3f}") ``` :::: @@ -292,7 +388,15 @@ bcf_model_root <- bcf( ## Python ```{python} -# Python implementation coming soon +bcf_model_root = BCFModel() +bcf_model_root.sample( + X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, + X_test=X_test, Z_test=Z_test, propensity_test=pi_test, + num_gfr=0, num_burnin=2000, num_mcmc=100, + general_params={"num_threads": 1, "keep_every": 5}, + prognostic_forest_params={"sample_sigma2_leaf": False}, + treatment_effect_forest_params={"sample_sigma2_leaf": False}, +) ``` :::: @@ -338,7 +442,24 @@ abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ## Python ```{python} -# Python implementation coming soon +mu_pred = bcf_model_root.mu_hat_test.mean(axis=1) +lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) +plt.scatter(mu_pred, mu_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") +plt.show() + +tau_pred = bcf_model_root.tau_hat_test.mean(axis=1) +lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) +plt.scatter(tau_pred, tau_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") +plt.show() + +plt.plot(bcf_model_root.global_var_samples) +plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) +plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") +plt.show() ``` :::: @@ -360,7 +481,10 @@ mean(cover) ## Python ```{python} -# Python implementation coming soon +test_lb = np.quantile(bcf_model_root.tau_hat_test, 0.025, axis=1) +test_ub = np.quantile(bcf_model_root.tau_hat_test, 0.975, axis=1) +cover = (test_lb <= tau_test) & (test_ub >= tau_test) +print(f"Coverage: {cover.mean():.3f}") ``` :::: @@ -437,7 +561,38 @@ tau_train <- tau_x[train_inds] ## Python ```{python} -# Python implementation coming soon +n = 500 +snr = 3 +x1 = rng.normal(size=n) +x2 = rng.normal(size=n) +x3 = rng.normal(size=n) +x4 = rng.binomial(1, 0.5, n).astype(float) +x5 = rng.choice([1, 2, 3], size=n).astype(float) +X = np.column_stack([x1, x2, x3, x4, x5]) +mu_x = mu2(X) # mu2 for Demo 2 +tau_x = tau2(X) +pi_x = (0.8 * norm.cdf((3 * mu_x / np.std(mu_x)) - 0.5 * X[:, 0]) + + 0.05 + rng.uniform(size=n) / 10) +Z = rng.binomial(1, pi_x, n).astype(float) +E_XZ = mu_x + Z * tau_x +y = E_XZ + rng.normal(size=n) * (np.std(E_XZ) / snr) + +X_df = pd.DataFrame({"x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5}) +X_df["x4"] = pd.Categorical(X_df["x4"].astype(int), categories=[0, 1], ordered=True) +X_df["x5"] = pd.Categorical(X_df["x5"].astype(int), categories=[1, 2, 3], ordered=True) + +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +n_train = n - n_test +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test = X_df.iloc[test_inds] +X_train = X_df.iloc[train_inds] +pi_test, pi_train = pi_x[test_inds], pi_x[train_inds] +Z_test, Z_train = Z[test_inds], Z[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +mu_test, mu_train = mu_x[test_inds], mu_x[train_inds] +tau_test, tau_train = tau_x[test_inds], tau_x[train_inds] ``` :::: @@ -477,7 +632,15 @@ bcf_model_warmstart <- bcf( ## Python ```{python} -# Python implementation coming soon +bcf_model_warmstart = BCFModel() +bcf_model_warmstart.sample( + X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, + X_test=X_test, Z_test=Z_test, propensity_test=pi_test, + num_gfr=10, num_burnin=0, num_mcmc=100, + general_params={"num_threads": 1, "keep_every": 5}, + prognostic_forest_params={"sample_sigma2_leaf": False}, + treatment_effect_forest_params={"sample_sigma2_leaf": False}, +) ``` :::: @@ -521,7 +684,26 @@ abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ## Python ```{python} -# Python implementation coming soon +sigma_observed = np.var(y - E_XZ) + +mu_pred = bcf_model_warmstart.mu_hat_test.mean(axis=1) +lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) +plt.scatter(mu_pred, mu_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") +plt.show() + +tau_pred = bcf_model_warmstart.tau_hat_test.mean(axis=1) +lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) +plt.scatter(tau_pred, tau_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") +plt.show() + +plt.plot(bcf_model_warmstart.global_var_samples) +plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) +plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") +plt.show() ``` :::: @@ -543,7 +725,10 @@ mean(cover) ## Python ```{python} -# Python implementation coming soon +test_lb = np.quantile(bcf_model_warmstart.tau_hat_test, 0.025, axis=1) +test_ub = np.quantile(bcf_model_warmstart.tau_hat_test, 0.975, axis=1) +cover = (test_lb <= tau_test) & (test_ub >= tau_test) +print(f"Coverage: {cover.mean():.3f}") ``` :::: @@ -581,7 +766,15 @@ bcf_model_root <- bcf( ## Python ```{python} -# Python implementation coming soon +bcf_model_root = BCFModel() +bcf_model_root.sample( + X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, + X_test=X_test, Z_test=Z_test, propensity_test=pi_test, + num_gfr=0, num_burnin=2000, num_mcmc=100, + general_params={"num_threads": 1, "keep_every": 5}, + prognostic_forest_params={"sample_sigma2_leaf": False}, + treatment_effect_forest_params={"sample_sigma2_leaf": False}, +) ``` :::: @@ -625,7 +818,24 @@ abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ## Python ```{python} -# Python implementation coming soon +mu_pred = bcf_model_root.mu_hat_test.mean(axis=1) +lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) +plt.scatter(mu_pred, mu_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") +plt.show() + +tau_pred = bcf_model_root.tau_hat_test.mean(axis=1) +lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) +plt.scatter(tau_pred, tau_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") +plt.show() + +plt.plot(bcf_model_root.global_var_samples) +plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) +plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") +plt.show() ``` :::: @@ -647,7 +857,10 @@ mean(cover) ## Python ```{python} -# Python implementation coming soon +test_lb = np.quantile(bcf_model_root.tau_hat_test, 0.025, axis=1) +test_ub = np.quantile(bcf_model_root.tau_hat_test, 0.975, axis=1) +cover = (test_lb <= tau_test) & (test_ub >= tau_test) +print(f"Coverage: {cover.mean():.3f}") ``` :::: diff --git a/vignettes/custom-sampling.qmd b/vignettes/custom-sampling.qmd index 47f654387..046d5bbf7 100644 --- a/vignettes/custom-sampling.qmd +++ b/vignettes/custom-sampling.qmd @@ -1,8 +1,21 @@ --- title: "Custom Sampling Routine" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + While the functions `bart()` and `bcf()` provide simple and performant interfaces for supervised learning / causal inference, `stochtree` also offers access to many of the "low-level" data structures that are typically implemented in C++. This low-level @@ -46,12 +59,18 @@ library(stochtree) ## Python ```{python} -# Python implementation coming soon +import numpy as np +import matplotlib.pyplot as plt +from stochtree import ( + RNG, Dataset, Forest, ForestContainer, ForestSampler, + GlobalVarianceModel, LeafVarianceModel, Residual, + ForestModelConfig, GlobalModelConfig, +) ``` :::: -# Demo 1: Supervised Learning +# Supervised Learning Demo ## Simulation @@ -85,7 +104,27 @@ resid <- (y-y_bar)/y_std ## Python ```{python} -# Python implementation coming soon +random_seed = 1234 +rng = np.random.default_rng(random_seed) + +n = 500 +p_X = 10 +p_W = 1 +X = rng.uniform(size=(n, p_X)) +W = rng.uniform(size=(n, p_W)) +# R uses X[,1] (1-indexed) = Python X[:,0] +f_XW = ( + ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * (-3 * W[:, 0]) + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * (-1 * W[:, 0]) + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * ( 1 * W[:, 0]) + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * ( 3 * W[:, 0]) +) +y = f_XW + rng.standard_normal(n) + +# Standardize outcome +y_bar = np.mean(y) +y_std = np.std(y) +resid = (y - y_bar) / y_std ``` :::: @@ -121,7 +160,24 @@ var_weights <- rep(1/p_X, p_X) ## Python ```{python} -# Python implementation coming soon +alpha = 0.9 +beta = 1.25 +min_samples_leaf = 1 +max_depth = 10 +num_trees = 100 +cutpoint_grid_size = 100 +global_variance_init = 1.0 +tau_init = 1.0 / num_trees +leaf_prior_scale = np.array([[tau_init]], order="C") +a_global = 4.0 +b_global = 2.0 +a_leaf = 2.0 +b_leaf = 0.5 +leaf_regression = True +feature_types = np.zeros(p_X, dtype=int) # 0 = numeric +var_weights = np.full(p_X, 1.0 / p_X) +leaf_model = 1 if leaf_regression else 0 # 1 = linear leaf (with basis) +leaf_dimension = p_W if leaf_regression else 1 ``` :::: @@ -178,7 +234,45 @@ active_forest$adjust_residual(forest_dataset, outcome, forest_model, ifelse(outc ## Python ```{python} -# Python implementation coming soon +# Dataset: covariates + basis +dataset = Dataset() +dataset.add_covariates(X) +dataset.add_basis(W) + +# Residual +residual = Residual(resid) + +# Random number generator +cpp_rng = RNG(random_seed) + +# Forest container (stores samples) and active forest (updated each iteration) +forest_container = ForestContainer(num_trees, leaf_dimension, False, False) +active_forest = Forest(num_trees, leaf_dimension, False, False) + +# Sampler config +global_model_config = GlobalModelConfig(global_error_variance=global_variance_init) +forest_model_config = ForestModelConfig( + num_trees=num_trees, + num_features=p_X, + num_observations=n, + feature_types=feature_types, + variable_weights=var_weights, + leaf_dimension=leaf_dimension, + alpha=alpha, + beta=beta, + min_samples_leaf=min_samples_leaf, + max_depth=max_depth, + leaf_model_type=leaf_model, + leaf_model_scale=leaf_prior_scale, + cutpoint_grid_size=cutpoint_grid_size, +) +forest_sampler = ForestSampler(dataset, global_model_config, forest_model_config) +global_var_model = GlobalVarianceModel() +leaf_var_model = LeafVarianceModel() + +# Initialize leaves of each tree +forest_init_val = np.zeros(leaf_dimension) +forest_sampler.prepare_for_sampler(dataset, residual, active_forest, leaf_model, forest_init_val) ``` :::: @@ -200,7 +294,11 @@ leaf_scale_samples <- c(tau_init, rep(0, num_samples)) ## Python ```{python} -# Python implementation coming soon +num_warmstart = 10 +num_mcmc = 100 +num_samples = num_warmstart + num_mcmc +global_var_samples = np.concatenate([[global_variance_init], np.zeros(num_samples)]) +leaf_scale_samples = np.concatenate([[tau_init], np.zeros(num_samples)]) ``` :::: @@ -238,7 +336,16 @@ for (i in 1:num_warmstart) { ## Python ```{python} -# Python implementation coming soon +for i in range(num_warmstart): + forest_sampler.sample_one_iteration( + forest_container, active_forest, dataset, residual, cpp_rng, + global_model_config, forest_model_config, True, True, 1, # keep_forest=True, gfr=True, num_threads=1 + ) + current_sigma2 = global_var_model.sample_one_iteration(residual, cpp_rng, a_global, b_global) + global_var_samples[i + 1] = current_sigma2 + leaf_scale_samples[i + 1] = leaf_var_model.sample_one_iteration(active_forest, cpp_rng, a_leaf, b_leaf) + leaf_prior_scale[0, 0] = leaf_scale_samples[i + 1] + forest_model_config.update_leaf_model_scale(leaf_prior_scale) ``` :::: @@ -277,7 +384,16 @@ for (i in (num_warmstart+1):num_samples) { ## Python ```{python} -# Python implementation coming soon +for i in range(num_warmstart, num_samples): + forest_sampler.sample_one_iteration( + forest_container, active_forest, dataset, residual, cpp_rng, + global_model_config, forest_model_config, True, False, 1, # keep_forest=True, gfr=False, num_threads=1 + ) + current_sigma2 = global_var_model.sample_one_iteration(residual, cpp_rng, a_global, b_global) + global_var_samples[i + 1] = current_sigma2 + leaf_scale_samples[i + 1] = leaf_var_model.sample_one_iteration(active_forest, cpp_rng, a_leaf, b_leaf) + leaf_prior_scale[0, 0] = leaf_scale_samples[i + 1] + forest_model_config.update_leaf_model_scale(leaf_prior_scale) ``` :::: @@ -299,7 +415,11 @@ sigma_samples <- sqrt(global_var_samples)*y_std ## Python ```{python} -# Python implementation coming soon +# Forest predictions: shape (n, num_samples); rescale to original y scale +preds = forest_container.predict(dataset) * y_std + y_bar + +# Global error variance (sigma, not sigma^2) +sigma_samples = np.sqrt(global_var_samples) * y_std ``` :::: @@ -322,7 +442,20 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +gfr_preds = preds[:, :num_warmstart] +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) +ax1.plot(sigma_samples[1:num_warmstart + 1]) +ax1.set_ylabel("sigma") +ax1.set_title("GFR: Global Error Scale") +ax2.scatter(gfr_preds.mean(axis=1), y, s=10, alpha=0.5) +lo = min(gfr_preds.mean(axis=1).min(), y.min()) +hi = max(gfr_preds.mean(axis=1).max(), y.max()) +ax2.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +ax2.set_xlabel("pred") +ax2.set_ylabel("actual") +ax2.set_title("GFR: Predicted vs Actual") +plt.tight_layout() +plt.show() ``` :::: @@ -343,7 +476,20 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +mcmc_preds = preds[:, num_warmstart:num_samples] +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) +ax1.plot(sigma_samples[num_warmstart + 1:num_samples + 1]) +ax1.set_ylabel("sigma") +ax1.set_title("MCMC: Global Error Scale") +ax2.scatter(mcmc_preds.mean(axis=1), y, s=10, alpha=0.5) +lo = min(mcmc_preds.mean(axis=1).min(), y.min()) +hi = max(mcmc_preds.mean(axis=1).max(), y.max()) +ax2.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +ax2.set_xlabel("pred") +ax2.set_ylabel("actual") +ax2.set_title("MCMC: Predicted vs Actual") +plt.tight_layout() +plt.show() ``` :::: diff --git a/vignettes/ensemble-kernel.qmd b/vignettes/ensemble-kernel.qmd index 4e087800e..7c45aa8a3 100644 --- a/vignettes/ensemble-kernel.qmd +++ b/vignettes/ensemble-kernel.qmd @@ -1,8 +1,21 @@ --- title: "Ensemble Kernel" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + A trained tree ensemble with strong out-of-sample performance admits a natural motivation for the "distance" between two samples: shared leaf membership. This vignette demonstrates how to extract a kernel matrix from a fitted `stochtree` @@ -33,6 +46,12 @@ demonstrate below. # Setup +Load necessary packages + +::::{.panel-tabset group="language"} + +## R + ```{r} library(stochtree) library(tgp) @@ -41,16 +60,53 @@ library(Matrix) library(mvtnorm) ``` +## Python + +```{python} +import numpy as np +import matplotlib.pyplot as plt +from scipy.sparse import csr_matrix +from sklearn.gaussian_process import GaussianProcessRegressor +from sklearn.gaussian_process.kernels import RBF, WhiteKernel +from sklearn.datasets import make_friedman1 + +from stochtree import BARTModel, compute_forest_leaf_indices +``` + +:::: + +Set a seed for reproducibility + +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed <- 101 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 101 +rng = np.random.default_rng(random_seed) +``` + +:::: + + # Demo 1: Univariate Supervised Learning -We begin with a simulated example from the `tgp` package (@gramacy2010categorical). -This data generating process (DGP) is non-stationary with a single numeric -covariate. We define a training set and test set and evaluate various approaches -to modeling the out of sample outcome data. +We begin with a non-stationary simulated DGP with a single numeric covariate, +originally described in @gramacy2010categorical. We define a training set and test +set and evaluate various approaches to modeling the out-of-sample outcome. ## Traditional Gaussian Process -We can use the `tgp` package to model this data with a classical Gaussian Process. +::::{.panel-tabset group="language"} + +## R ```{r} #| results: hide @@ -71,14 +127,64 @@ plot(model_gp$ZZ.mean, y_test, xlab = "predicted", ylab = "actual", main = "Gaus abline(0,1,lwd=2.5,lty=3,col="red") ``` +## Python + +```{python} +# Generate the data +X_train_1d = np.linspace(0, 20, 100) +X_test_1d = np.linspace(0, 20, 99) + +y_train_1 = (np.sin(np.pi * X_train_1d / 5) + 0.2 * np.cos(4 * np.pi * X_train_1d / 5)) * (X_train_1d <= 9.6) +y_train_1[X_train_1d > 9.6] = -1 + X_train_1d[X_train_1d > 9.6] / 10 +y_train_1 = y_train_1 + rng.normal(0, 0.1, len(X_train_1d)) + +y_test_1 = (np.sin(np.pi * X_test_1d / 5) + 0.2 * np.cos(4 * np.pi * X_test_1d / 5)) * (X_test_1d <= 9.6) +y_test_1[X_test_1d > 9.6] = -1 + X_test_1d[X_test_1d > 9.6] / 10 + +# sklearn's GaussianProcessRegressor is used here in place of R's tgp::bgp +X_train_2d = X_train_1d.reshape(-1, 1) +X_test_2d = X_test_1d.reshape(-1, 1) +gp_kernel = RBF(length_scale=1.0) + WhiteKernel(noise_level=0.01) +model_gp_1 = GaussianProcessRegressor(kernel=gp_kernel, n_restarts_optimizer=5, + random_state=random_seed) +model_gp_1.fit(X_train_2d, y_train_1) +gp_pred_1 = model_gp_1.predict(X_test_2d) + +plt.scatter(gp_pred_1, y_test_1, alpha=0.5) +lo, hi = min(gp_pred_1.min(), y_test_1.min()), max(gp_pred_1.max(), y_test_1.max()) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Gaussian process") +plt.show() +``` + +:::: + Assess the RMSE +::::{.panel-tabset group="language"} + +## R + ```{r} sqrt(mean((model_gp$ZZ.mean - y_test)^2)) ``` +## Python + +```{python} +print(f"RMSE: {np.sqrt(np.mean((gp_pred_1 - y_test_1)**2)):.4f}") +``` + +:::: + ## BART-based Gaussian Process +::::{.panel-tabset group="language"} + +## R + ```{r} # Run BART on the data num_trees <- 200 @@ -86,8 +192,10 @@ sigma_leaf <- 1/num_trees X_train <- as.data.frame(X_train) X_test <- as.data.frame(X_test) colnames(X_train) <- colnames(X_test) <- "x1" +general_params <- list(num_threads=1) mean_forest_params <- list(num_trees=num_trees, sigma2_leaf_init=sigma_leaf) -bart_model <- bart(X_train=X_train, y_train=y_train, X_test=X_test, mean_forest_params = mean_forest_params) +bart_model <- bart(X_train=X_train, y_train=y_train, X_test=X_test, + general_params = general_params, mean_forest_params = mean_forest_params) # Extract kernels needed for kriging leaf_mat_train <- computeForestLeafIndices(bart_model, X_train, forest_type = "mean", @@ -115,19 +223,99 @@ plot(yhat_mean_test, y_test, xlab = "predicted", ylab = "actual", main = "BART G abline(0,1,lwd=2.5,lty=3,col="red") ``` +## Python + +```{python} +# Run BART on the data +num_trees = 200 +sigma_leaf = 1 / num_trees +general_params = {"num_threads": 1} +mean_forest_params = {"num_trees": num_trees, "sigma2_leaf_init": sigma_leaf} +bart_model_1 = BARTModel() +bart_model_1.sample(X_train=X_train_2d, y_train=y_train_1, X_test=X_test_2d, + general_params=general_params, mean_forest_params=mean_forest_params) + +# Extract leaf indices for the last retained forest sample +last_sample = bart_model_1.num_samples - 1 +leaf_mat_train_1 = compute_forest_leaf_indices(bart_model_1, X_train_2d, + forest_type="mean", forest_inds=last_sample) +leaf_mat_test_1 = compute_forest_leaf_indices(bart_model_1, X_test_2d, + forest_type="mean", forest_inds=last_sample) + +# Build sparse W matrices (rows = observations, cols = global leaf indices) +n_train_1, n_test_1 = len(y_train_1), len(y_test_1) +col_inds_train = leaf_mat_train_1.flatten() +col_inds_test = leaf_mat_test_1.flatten() +max_col = max(col_inds_train.max(), col_inds_test.max()) + 1 +W_train_1 = csr_matrix( + (np.ones(len(col_inds_train)), (np.tile(np.arange(n_train_1), num_trees), col_inds_train)), + shape=(n_train_1, max_col), +) +W_test_1 = csr_matrix( + (np.ones(len(col_inds_test)), (np.tile(np.arange(n_test_1), num_trees), col_inds_test)), + shape=(n_test_1, max_col), +) + +# Compute kernel matrices +W_tr = W_train_1.toarray() +W_te = W_test_1.toarray() +Sigma_22 = (W_tr @ W_tr.T) / num_trees +Sigma_11 = (W_te @ W_te.T) / num_trees +Sigma_12 = (W_te @ W_tr.T) / num_trees +Sigma_21 = Sigma_12.T +Sigma_22_inv = np.linalg.pinv(Sigma_22) + +# Compute GP posterior mean and covariance +mu_tilde = Sigma_12 @ Sigma_22_inv @ y_train_1 +Sigma_tilde = sigma_leaf * (Sigma_11 - Sigma_12 @ Sigma_22_inv @ Sigma_21) +Sigma_tilde += 1e-8 * np.eye(n_test_1) # small jitter for numerical stability + +# Sample from f(X_test) | X_test, X_train, f(X_train) +gp_samples_1 = rng.multivariate_normal(mu_tilde, Sigma_tilde, size=1000, method="eigh") + +# Posterior mean predictions +yhat_mean_test_1 = gp_samples_1.mean(axis=0) +lo = min(yhat_mean_test_1.min(), y_test_1.min()) +hi = max(yhat_mean_test_1.max(), y_test_1.max()) +plt.scatter(yhat_mean_test_1, y_test_1, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("BART Gaussian process") +plt.show() +``` + +:::: + Assess the RMSE +::::{.panel-tabset group="language"} + +## R + ```{r} sqrt(mean((yhat_mean_test - y_test)^2)) ``` +## Python + +```{python} +print(f"RMSE: {np.sqrt(np.mean((yhat_mean_test_1 - y_test_1)**2)):.4f}") +``` + +:::: + # Demo 2: Multivariate Supervised Learning -We proceed to the simulated "Friedman" dataset, as implemented in `tgp`. +We proceed to the simulated "Friedman" dataset (@friedman1991multivariate). In R, this +is accessed via `tgp::friedman.1.data`; in Python we use +`sklearn.datasets.make_friedman1`, which implements the same DGP. ## Traditional Gaussian Process -We can use the `tgp` package to model this data with a classical Gaussian Process. +::::{.panel-tabset group="language"} + +## R ```{r} #| results: hide @@ -150,22 +338,74 @@ plot(model_gp$ZZ.mean, y_test, xlab = "predicted", ylab = "actual", main = "Gaus abline(0,1,lwd=2.5,lty=3,col="red") ``` +## Python + +```{python} +# Generate the data: 10 Friedman features + 10 noise features +n = 100 +X_raw, y_friedman = make_friedman1(n_samples=n, n_features=10, noise=1.0, + random_state=random_seed) +X_2 = np.hstack([X_raw, rng.uniform(size=(n, 10))]) # 20 features total +y_2 = y_friedman + +train_inds_2 = rng.choice(n, int(0.8 * n), replace=False) +test_inds_2 = np.setdiff1d(np.arange(n), train_inds_2) +X_train_2, X_test_2 = X_2[train_inds_2], X_2[test_inds_2] +y_train_2, y_test_2 = y_2[train_inds_2], y_2[test_inds_2] + +# sklearn's GaussianProcessRegressor is used here in place of R's tgp::bgp +gp_kernel_2 = RBF(length_scale=1.0) + WhiteKernel(noise_level=1.0) +model_gp_2 = GaussianProcessRegressor(kernel=gp_kernel_2, n_restarts_optimizer=1, + random_state=random_seed) +model_gp_2.fit(X_train_2, y_train_2) +gp_pred_2 = model_gp_2.predict(X_test_2) + +lo = min(gp_pred_2.min(), y_test_2.min()) +hi = max(gp_pred_2.max(), y_test_2.max()) +plt.scatter(gp_pred_2, y_test_2, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Gaussian process") +plt.show() +``` + +:::: + Assess the RMSE +::::{.panel-tabset group="language"} + +## R + ```{r} sqrt(mean((model_gp$ZZ.mean - y_test)^2)) ``` +## Python + +```{python} +print(f"RMSE: {np.sqrt(np.mean((gp_pred_2 - y_test_2)**2)):.4f}") +``` + +:::: + ## BART-based Gaussian Process +::::{.panel-tabset group="language"} + +## R + ```{r} # Run BART on the data num_trees <- 200 sigma_leaf <- 1/num_trees X_train <- as.data.frame(X_train) X_test <- as.data.frame(X_test) +general_params <- list(num_threads=1) mean_forest_params <- list(num_trees=num_trees, sigma2_leaf_init=sigma_leaf) -bart_model <- bart(X_train=X_train, y_train=y_train, X_test=X_test, mean_forest_params = mean_forest_params) +bart_model <- bart(X_train=X_train, y_train=y_train, X_test=X_test, + general_params = general_params, mean_forest_params = mean_forest_params) # Extract kernels needed for kriging leaf_mat_train <- computeForestLeafIndices(bart_model, X_train, forest_type = "mean", @@ -193,12 +433,83 @@ plot(yhat_mean_test, y_test, xlab = "predicted", ylab = "actual", main = "BART G abline(0,1,lwd=2.5,lty=3,col="red") ``` +## Python + +```{python} +num_trees = 200 +sigma_leaf = 1 / num_trees +general_params = {"num_threads": 1} +mean_forest_params = {"num_trees": num_trees, "sigma2_leaf_init": sigma_leaf} +bart_model_2 = BARTModel() +bart_model_2.sample(X_train=X_train_2, y_train=y_train_2, X_test=X_test_2, + general_params=general_params, mean_forest_params=mean_forest_params) + +last_sample_2 = bart_model_2.num_samples - 1 +leaf_mat_train_2 = compute_forest_leaf_indices(bart_model_2, X_train_2, + forest_type="mean", forest_inds=last_sample_2) +leaf_mat_test_2 = compute_forest_leaf_indices(bart_model_2, X_test_2, + forest_type="mean", forest_inds=last_sample_2) + +n_train_2, n_test_2 = len(y_train_2), len(y_test_2) +col_inds_train_2 = leaf_mat_train_2.flatten() +col_inds_test_2 = leaf_mat_test_2.flatten() +max_col_2 = max(col_inds_train_2.max(), col_inds_test_2.max()) + 1 +W_train_2 = csr_matrix( + (np.ones(len(col_inds_train_2)), + (np.tile(np.arange(n_train_2), num_trees), col_inds_train_2)), + shape=(n_train_2, max_col_2), +) +W_test_2 = csr_matrix( + (np.ones(len(col_inds_test_2)), + (np.tile(np.arange(n_test_2), num_trees), col_inds_test_2)), + shape=(n_test_2, max_col_2), +) + +W_tr2 = W_train_2.toarray() +W_te2 = W_test_2.toarray() +Sigma_22_2 = (W_tr2 @ W_tr2.T) / num_trees +Sigma_11_2 = (W_te2 @ W_te2.T) / num_trees +Sigma_12_2 = (W_te2 @ W_tr2.T) / num_trees +Sigma_21_2 = Sigma_12_2.T +Sigma_22_inv_2 = np.linalg.pinv(Sigma_22_2) + +mu_tilde_2 = Sigma_12_2 @ Sigma_22_inv_2 @ y_train_2 +Sigma_tilde_2 = sigma_leaf * (Sigma_11_2 - Sigma_12_2 @ Sigma_22_inv_2 @ Sigma_21_2) +Sigma_tilde_2 += 1e-8 * np.eye(n_test_2) + +gp_samples_2 = rng.multivariate_normal(mu_tilde_2, Sigma_tilde_2, size=1000, method="eigh") +yhat_mean_test_2 = gp_samples_2.mean(axis=0) + +lo = min(yhat_mean_test_2.min(), y_test_2.min()) +hi = max(yhat_mean_test_2.max(), y_test_2.max()) +plt.scatter(yhat_mean_test_2, y_test_2, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("BART Gaussian process") +plt.show() +``` + +:::: + Assess the RMSE +::::{.panel-tabset group="language"} + +## R + ```{r} sqrt(mean((yhat_mean_test - y_test)^2)) ``` +## Python + +```{python} +print(f"RMSE: {np.sqrt(np.mean((yhat_mean_test_2 - y_test_2)**2)):.4f}") +``` + +:::: + While the use case of a BART kernel for classical kriging is perhaps unclear without more empirical investigation, the kernel approach can be very beneficial for causal inference applications. diff --git a/vignettes/heteroskedastic.qmd b/vignettes/heteroskedastic.qmd index fd9105741..6c1beacca 100644 --- a/vignettes/heteroskedastic.qmd +++ b/vignettes/heteroskedastic.qmd @@ -1,8 +1,21 @@ --- title: "Heteroskedastic BART" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + This vignette demonstrates how to use the `bart()` function for Bayesian supervised learning (@chipman2010bart) and causal inference (@hahn2020bayesian), with an additional "variance forest," for modeling conditional variance (see @murray2021log). @@ -20,7 +33,11 @@ library(stochtree) ## Python ```{python} -# Python implementation coming soon +import numpy as np +import matplotlib.pyplot as plt +from stochtree import BARTModel + +rng = np.random.default_rng(101) ``` :::: @@ -83,7 +100,24 @@ s_x_train <- s_XW[train_inds] ## Python ```{python} -# Python implementation coming soon +n, p_x = 500, 10 +X = rng.uniform(size=(n, p_x)) +# Note: R's X[,1] = Python's X[:,0] +s_XW = ( + ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * 0.5 + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * 1.0 + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * 2.0 + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * 3.0 +) +y = rng.normal(size=n) * s_XW + +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test, X_train = X[test_inds], X[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +s_x_test, s_x_train = s_XW[test_inds], s_XW[train_inds] ``` :::: @@ -119,7 +153,15 @@ bart_model_warmstart <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +num_trees = 20 +bart_model_warmstart = BARTModel() +bart_model_warmstart.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=10, num_burnin=0, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0}, + variance_forest_params={"num_trees": num_trees}, +) ``` :::: @@ -139,7 +181,12 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: @@ -171,7 +218,14 @@ bart_model_mcmc <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model_mcmc = BARTModel() +bart_model_mcmc.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=0, num_burnin=1000, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0}, + variance_forest_params={"num_trees": num_trees}, +) ``` :::: @@ -191,7 +245,12 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: @@ -252,7 +311,24 @@ s_x_train <- s_XW[train_inds] ## Python ```{python} -# Python implementation coming soon +n, p_x = 500, 10 +X = rng.uniform(size=(n, p_x)) +# R's X[,3] = Python's X[:,2] +s_XW = ( + ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * (0.5 * X[:, 2]) + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * (1.0 * X[:, 2]) + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * (2.0 * X[:, 2]) + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * (3.0 * X[:, 2]) +) +y = rng.normal(size=n) * s_XW + +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test, X_train = X[test_inds], X[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +s_x_test, s_x_train = s_XW[test_inds], s_XW[train_inds] ``` :::: @@ -286,7 +362,17 @@ bart_model_warmstart <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +num_trees = 20 +bart_model_warmstart = BARTModel() +bart_model_warmstart.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=10, num_burnin=0, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0, + "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, + variance_forest_params={"num_trees": num_trees, "alpha": 0.95, + "beta": 1.25, "min_samples_leaf": 1}, +) ``` :::: @@ -304,7 +390,12 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: @@ -335,7 +426,16 @@ bart_model_mcmc <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model_mcmc = BARTModel() +bart_model_mcmc.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=0, num_burnin=1000, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0, + "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, + variance_forest_params={"num_trees": num_trees, "alpha": 0.95, + "beta": 1.25, "min_samples_leaf": 1}, +) ``` :::: @@ -353,7 +453,12 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: @@ -425,7 +530,31 @@ s_x_test <- s_XW[test_inds] ## Python ```{python} -# Python implementation coming soon +n, p_x = 500, 10 +X = rng.uniform(size=(n, p_x)) +# R's X[,2] = Python's X[:,1]; R's X[,1] = Python's X[:,0] +f_XW = ( + ((X[:, 1] >= 0) & (X[:, 1] < 0.25)) * (-6) + + ((X[:, 1] >= 0.25) & (X[:, 1] < 0.5)) * (-2) + + ((X[:, 1] >= 0.5) & (X[:, 1] < 0.75)) * (2) + + ((X[:, 1] >= 0.75) & (X[:, 1] < 1.0)) * (6) +) +s_XW = ( + ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * 0.5 + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * 1.0 + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * 2.0 + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * 3.0 +) +y = f_XW + rng.normal(size=n) * s_XW + +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test, X_train = X[test_inds], X[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +f_x_test = f_XW[test_inds] +s_x_test = s_XW[test_inds] ``` :::: @@ -458,7 +587,16 @@ bart_model_warmstart <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model_warmstart = BARTModel() +bart_model_warmstart.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=10, num_burnin=0, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, + "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, + variance_forest_params={"num_trees": 50, "alpha": 0.95, + "beta": 1.25, "min_samples_leaf": 5}, +) ``` :::: @@ -479,7 +617,19 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +mean_pred = bart_model_warmstart.y_hat_test.mean(axis=1) +lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) +plt.scatter(mean_pred, y_test, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") +plt.show() + +var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: @@ -510,7 +660,16 @@ bart_model_mcmc <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model_mcmc = BARTModel() +bart_model_mcmc.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=0, num_burnin=1000, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, + "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, + variance_forest_params={"num_trees": 50, "alpha": 0.95, + "beta": 1.25, "min_samples_leaf": 5}, +) ``` :::: @@ -531,7 +690,19 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +mean_pred = bart_model_mcmc.y_hat_test.mean(axis=1) +lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) +plt.scatter(mean_pred, y_test, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") +plt.show() + +var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: @@ -603,7 +774,31 @@ s_x_test <- s_XW[test_inds] ## Python ```{python} -# Python implementation coming soon +n, p_x = 500, 10 +X = rng.uniform(size=(n, p_x)) +# R's X[,2]=X[:,1], X[,4]=X[:,3], X[,1]=X[:,0], X[,3]=X[:,2] +f_XW = ( + ((X[:, 1] >= 0) & (X[:, 1] < 0.25)) * (-6 * X[:, 3]) + + ((X[:, 1] >= 0.25) & (X[:, 1] < 0.5)) * (-2 * X[:, 3]) + + ((X[:, 1] >= 0.5) & (X[:, 1] < 0.75)) * (2 * X[:, 3]) + + ((X[:, 1] >= 0.75) & (X[:, 1] < 1.0)) * (6 * X[:, 3]) +) +s_XW = ( + ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * (0.5 * X[:, 2]) + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * (1.0 * X[:, 2]) + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * (2.0 * X[:, 2]) + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * (3.0 * X[:, 2]) +) +y = f_XW + rng.normal(size=n) * s_XW + +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test, X_train = X[test_inds], X[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +f_x_test = f_XW[test_inds] +s_x_test = s_XW[test_inds] ``` :::: @@ -636,7 +831,16 @@ bart_model_warmstart <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model_warmstart = BARTModel() +bart_model_warmstart.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=10, num_burnin=0, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, + "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, + variance_forest_params={"num_trees": 50, "alpha": 0.95, + "beta": 1.25, "min_samples_leaf": 5}, +) ``` :::: @@ -657,7 +861,19 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +mean_pred = bart_model_warmstart.y_hat_test.mean(axis=1) +lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) +plt.scatter(mean_pred, y_test, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") +plt.show() + +var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: @@ -688,7 +904,16 @@ bart_model_mcmc <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model_mcmc = BARTModel() +bart_model_mcmc.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=0, num_burnin=1000, num_mcmc=100, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, + "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, + variance_forest_params={"num_trees": 50, "alpha": 0.95, + "beta": 1.25, "min_samples_leaf": 5}, +) ``` :::: @@ -709,7 +934,19 @@ abline(0,1,col="red",lty=2,lwd=2.5) ## Python ```{python} -# Python implementation coming soon +mean_pred = bart_model_mcmc.y_hat_test.mean(axis=1) +lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) +plt.scatter(mean_pred, y_test, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") +plt.show() + +var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) +lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) +plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.show() ``` :::: diff --git a/vignettes/iv.qmd b/vignettes/iv.qmd index 3342d432e..b41fb12c3 100644 --- a/vignettes/iv.qmd +++ b/vignettes/iv.qmd @@ -6,10 +6,23 @@ author: - name: Drew Herren affiliation: University of Texas at Austin date: today -bibliography: R/IV/iv.bib +bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- -## Introduction +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + +# Introduction Here we consider a causal inference problem with a binary treatment and a binary outcome where there is unobserved confounding, but an exogenous instrument is available (also @@ -17,7 +30,7 @@ binary). This problem requires several extensions to the basic BART model, all o can be implemented as Gibbs samplers using `stochtree`. Our analysis follows the Bayesian nonparametric approach described in the supplement to @hahn2016bayesian. -## Background +# Background To be concrete, suppose we wish to measure the effect of receiving a flu vaccine on the probability of getting the flu. Individuals who opt to get a flu shot differ in many @@ -35,10 +48,11 @@ Let $V$ denote the treatment variable (vaccine). Let $Y$ denote the response covariate (patient age). Let $S$ denote the *principal strata*, an exhaustive characterization of how individuals -are affected by the encouragement. Some people will get a flu shot no matter what: -*always takers* ($a$). Some will not get the shot no matter what: *never takers* ($n$). -*Compliers* ($c$) would not have gotten the shot but for the encouragement. We assume -no *defiers* ($d$). +are affected by the encouragement. +Some people will get a flu shot no matter what: *always takers* ($a$). +Some will not get the shot no matter what: *never takers* ($n$). +*Compliers* ($c$) would not have gotten the shot but for the encouragement. +We assume no *defiers* ($d$). ## The Causal Diagram @@ -110,20 +124,14 @@ $$ and analogously for $\gamma_c^{11}(x)$. -## Load Libraries +# Setup + +We load all necessary libraries :::{.panel-tabset group="language"} ## R -```{r} -#| include: false -reticulate::use_python( - Sys.getenv("RETICULATE_PYTHON", unset = Sys.which("python3")), - required = TRUE -) -``` - ```{r} #| message: false library(stochtree) @@ -144,9 +152,29 @@ from stochtree import ( ::: -## Simulate the Data +And set a seed for reproducibility + +:::{.panel-tabset group="language"} + +## R + +```{r} +random_seed <- 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +::: + +## Data Generation -### Setup +Data size :::{.panel-tabset group="language"} @@ -154,23 +182,17 @@ from stochtree import ( ```{r} n <- 20000 -random_seed <- NULL ``` ## Python ```{python} n = 20000 -random_seed = None -if random_seed is not None: - rng = np.random.default_rng(random_seed) -else: - rng = np.random.default_rng() ``` ::: -### Generate the Instrument +Generate the Instrument :::{.panel-tabset group="language"} @@ -188,10 +210,8 @@ z = rng.binomial(n=1, p=0.5, size=n) ::: -### Generate the Covariate - -We think of $X$ as patient age, drawn from a uniform distribution on $[0, 3]$ -(pre-standardized for illustration purposes). +We conceptualize a covariate $X$ as patient age, drawn from a uniform distribution on $[0, 3]$ +(pre-standardized for illustration purposes) and generate the covariate :::{.panel-tabset group="language"} @@ -213,10 +233,8 @@ x = X[:, 0] ::: -### Generate the Principal Strata - -We generate $S$ from a logistic model in $X$, parameterized so that the probability -of being a never taker decreases with age. +We generate principal strata $S$ from a logistic model in $X$, parameterized so that the probability +of being a never taker decreases with age :::{.panel-tabset group="language"} @@ -262,10 +280,8 @@ for i in range(n): ::: -### Generate the Treatment - -The treatment $V$ is a deterministic function of $S$ and $Z$ — this is what gives the -principal strata their meaning. +The treatment $V$ is generated as a deterministic function of $S$ and $Z$ — this is what gives the +principal strata their meaning :::{.panel-tabset group="language"} @@ -283,11 +299,10 @@ v = 1*(s == 'a') + 0*(s == 'n') + z*(s == "c") + (1-z)*(s == "d") ::: -### Generate the Outcome - -The outcome structural model — by varying this function we can alter the -identification conditions. Setting it to depend on `zval` violates the exclusion -restriction, and we do so here to illustrate partial identification. +The outcome is generated according to the structural model below. +By varying this function we can alter the identification conditions. +Setting it to depend on `zval` violates the exclusion restriction, +and we do so here to illustrate partial identification. :::{.panel-tabset group="language"} @@ -315,10 +330,9 @@ y = rng.binomial(n=1, p=gamfun(X[:, 0], v, z, s), size=n) ::: -### Organize the Data +## Model Fitting -For the monotone probit model, the observations must be sorted so that $Z=1$ cases -come first. +In order to fit a monotone probit model, the observations must be sorted so that $Z=1$ cases come first. :::{.panel-tabset group="language"} @@ -354,8 +368,6 @@ x = x[sort_index] ::: -## Fit the Outcome Model - We fit a probit BART model for $\Pr(Y=1 \mid V=1, Z=1, X=x)$ using the Albert–Chib [@albert1993bayesian] data augmentation Gibbs sampler. We initialize the forest, enter the main loop (alternating: sample forest | sample latent utilities), @@ -366,7 +378,6 @@ and retain all post-warmstart draws. ## R ```{r} -#| cache: true num_warmstart <- 10 num_mcmc <- 1000 num_samples <- num_warmstart + num_mcmc @@ -415,7 +426,7 @@ for (i in seq_len(num_samples)) { forest_model$sample_one_iteration( forest_dataset, outcome, forest_samples, active_forest, rng_r, forest_model_config, global_model_config, - keep_forest = TRUE, gfr = gfr_flag + keep_forest = TRUE, gfr = gfr_flag, num_threads = 1 ) eta <- forest_samples$predict_raw_single_forest(forest_dataset, i - 1) U1 <- runif(n1, pnorm(0, eta[y == 1], 1), 1) @@ -430,9 +441,8 @@ for (i in seq_len(num_samples)) { ## Python ```{python} -#| cache: true -num_warmstart <- 10 -num_mcmc <- 1000 +num_warmstart = 10 +num_mcmc = 1000 num_samples = num_warmstart + num_mcmc alpha = 0.95; beta = 2; min_samples_leaf = 1; max_depth = 20 @@ -476,7 +486,7 @@ for i in range(num_samples): forest_sampler.sample_one_iteration( forest_samples, active_forest, forest_dataset, outcome, cpp_rng, global_model_config, forest_model_config, - keep_forest=True, gfr=gfr_flag, + keep_forest=True, gfr=gfr_flag, num_threads=1, ) eta = np.squeeze(forest_samples.predict_raw_single_forest(forest_dataset, i)) mu0 = eta[y == 0]; mu1 = eta[y == 1] @@ -489,11 +499,7 @@ for i in range(num_samples): ::: -## Fit the Monotone Probit Model - -The monotonicity constraint -$\Pr(V=1 \mid Z=0, X=x) \leq \Pr(V=1 \mid Z=1, X=x)$ is enforced via the -data augmentation of @papakostas2023forecasts. We parameterize: +The monotonicity constraint $\Pr(V=1 \mid Z=0, X=x) \leq \Pr(V=1 \mid Z=1, X=x)$ is enforced via the data augmentation of @papakostas2023forecasts. We parameterize $$ \Pr(V=1 \mid Z=0, X=x) = \Phi_f(x)\,\Phi_h(x), \qquad @@ -507,7 +513,6 @@ where $\Phi_\mu(x)$ is the normal CDF with mean $\mu(x)$ and variance 1. ## R ```{r} -#| cache: true X_h <- as.matrix(X[z == 0, ]) n0 <- sum(z == 0); n1 <- sum(z == 1) num_trees_f <- 50; num_trees_h <- 20 @@ -563,12 +568,12 @@ gfr_flag <- TRUE for (i in seq_len(num_samples)) { if (i > num_warmstart) gfr_flag <- FALSE fm_f$sample_one_iteration(forest_dataset_f, out_f, fs_f, af_f, - rng_r, fmc_f, gmc_mono, keep_forest = TRUE, gfr = gfr_flag) + rng_r, fmc_f, gmc_mono, keep_forest = TRUE, gfr = gfr_flag, num_threads = 1) fm_h$sample_one_iteration(forest_dataset_h, out_h, fs_h, af_h, - rng_r, fmc_h, gmc_mono, keep_forest = TRUE, gfr = gfr_flag) + rng_r, fmc_h, gmc_mono, keep_forest = TRUE, gfr = gfr_flag, num_threads = 1) - eta_f <- forest_samples_f$predict_raw_single_forest(forest_dataset_f, i - 1) - eta_h <- forest_samples_h$predict_raw_single_forest(forest_dataset_h, i - 1) + eta_f <- fs_f$predict_raw_single_forest(forest_dataset_f, i - 1) + eta_h <- fs_h$predict_raw_single_forest(forest_dataset_h, i - 1) idx0 <- which(v0 == 0) w1 <- (1 - pnorm(eta_h[idx0])) * (1 - pnorm(eta_f[n1 + idx0])) @@ -598,7 +603,6 @@ for (i in seq_len(num_samples)) { ## Python ```{python} -#| cache: true X_h = X[z == 0, :] n0 = int(np.sum(z == 0)); n1 = int(np.sum(z == 1)) num_trees_f = 50; num_trees_h = 20 @@ -651,9 +655,9 @@ for i in range(num_samples): if i >= num_warmstart: gfr_flag = False fs_f.sample_one_iteration(forest_samples_f, af_f, forest_dataset_f, out_f, - cpp_rng, gmc_mono, fmc_f, keep_forest=True, gfr=gfr_flag) + cpp_rng, gmc_mono, fmc_f, keep_forest=True, gfr=gfr_flag, num_threads=1) fs_h.sample_one_iteration(forest_samples_h, af_h, forest_dataset_h, out_h, - cpp_rng, gmc_mono, fmc_h, keep_forest=True, gfr=gfr_flag) + cpp_rng, gmc_mono, fmc_h, keep_forest=True, gfr=gfr_flag, num_threads=1) eta_f = np.squeeze(forest_samples_f.predict_raw_single_forest(forest_dataset_f, i)) eta_h = np.squeeze(forest_samples_h.predict_raw_single_forest(forest_dataset_h, i)) diff --git a/vignettes/multi-chain.qmd b/vignettes/multi-chain.qmd index 348ab630c..92b08d40e 100644 --- a/vignettes/multi-chain.qmd +++ b/vignettes/multi-chain.qmd @@ -1,8 +1,21 @@ --- title: "Multi-Chain Inference" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + # Motivation Mixing of an MCMC sampler is a perennial concern for complex Bayesian models. BART @@ -40,7 +53,12 @@ library(doParallel) ## Python ```{python} -# Python implementation coming soon +import numpy as np +import matplotlib.pyplot as plt +import arviz as az +from stochtree import BARTModel + +rng = np.random.default_rng(1111) ``` :::: @@ -89,7 +107,23 @@ y_train <- y[train_inds] ## Python ```{python} -# Python implementation coming soon +n, p_x, p_w, snr = 500, 10, 1, 3 +X = rng.uniform(size=(n, p_x)) +leaf_basis = rng.uniform(size=(n, p_w)) +f_XW = (((0 <= X[:, 0]) & (0.25 > X[:, 0])) * (-7.5 * leaf_basis[:, 0]) + + ((0.25 <= X[:, 0]) & (0.5 > X[:, 0])) * (-2.5 * leaf_basis[:, 0]) + + ((0.5 <= X[:, 0]) & (0.75 > X[:, 0])) * (2.5 * leaf_basis[:, 0]) + + ((0.75 <= X[:, 0]) & (1 > X[:, 0])) * (7.5 * leaf_basis[:, 0])) +noise_sd = np.std(f_XW) / snr +y = f_XW + rng.normal(0, noise_sd, size=n) + +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test, X_train = X[test_inds], X[train_inds] +leaf_basis_test, leaf_basis_train = leaf_basis[test_inds], leaf_basis[train_inds] +y_test, y_train = y[test_inds], y[train_inds] ``` :::: @@ -120,7 +154,10 @@ num_mcmc <- 2000 ## Python ```{python} -# Python implementation coming soon +num_chains = 4 +num_gfr = 0 +num_burnin = 1000 +num_mcmc = 2000 ``` :::: @@ -146,14 +183,19 @@ bart_model <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, leaf_basis_train=leaf_basis_train, y_train=y_train, + num_gfr=num_gfr, num_burnin=num_burnin, num_mcmc=num_mcmc, + general_params={"num_threads": 1, "num_chains": num_chains}, +) ``` :::: -Now we have a `bartmodel` object with `num_chains * num_mcmc` samples stored -internally. These samples are arranged sequentially, with the first `num_mcmc` -samples corresponding to chain 1, the next `num_mcmc` samples to chain 2, etc. +Now we have a model with `num_chains * num_mcmc` samples stored internally. These +samples are arranged sequentially, with the first `num_mcmc` samples corresponding +to chain 1, the next `num_mcmc` samples to chain 2, etc. Since each chain is a set of samples of the same model, we can analyze the samples collectively, for example, by looking at out-of-sample predictions. @@ -177,7 +219,14 @@ abline(0, 1, col = "red", lty = 3, lwd = 3) ## Python ```{python} -# Python implementation coming soon +y_hat_test = bart_model.predict( + X=X_test, leaf_basis=leaf_basis_test, type="mean", terms="y_hat" +) +lo, hi = min(y_hat_test.min(), y_test.min()), max(y_hat_test.max(), y_test.max()) +plt.scatter(y_hat_test, y_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual") +plt.show() ``` :::: @@ -218,7 +267,18 @@ cat(paste0( ## Python ```{python} -# Python implementation coming soon +# Reshape flat sigma2 samples into (num_chains, num_mcmc) for per-chain diagnostics +# az.from_dict requires nested dict: {"posterior": {"var": array(chains, draws)}} +idata = az.from_dict({"posterior": {"sigma2": bart_model.global_var_samples.reshape(num_chains, num_mcmc)}}) + +az.plot_trace(idata) +plt.axhline(noise_sd**2, color="black", linestyle="dashed", linewidth=1.5) +plt.show() + +print("ESS: ", az.ess(idata)) +print("R-hat:", az.rhat(idata)) +az.plot_autocorr(idata) +plt.show() ``` :::: @@ -242,7 +302,8 @@ dimnames(coda_array) <- list( ## Python ```{python} -# Python implementation coming soon +# sigma2_by_chain already has shape (num_chains, num_mcmc) — ready for per-chain plots +sigma2_chains = bart_model.global_var_samples.reshape(num_chains, num_mcmc) ``` :::: @@ -279,7 +340,15 @@ bayesplot::mcmc_hist_by_chain( ## Python ```{python} -# Python implementation coming soon +fig, axes = plt.subplots(1, num_chains, figsize=(12, 3), sharey=True) +for i, ax in enumerate(axes): + ax.hist(sigma2_chains[i], bins=30) + ax.axvline(noise_sd**2, color="black", linestyle="dashed", linewidth=1.5) + ax.set_title(f"Chain {i+1}") + ax.set_xlabel(r"$\sigma^2$") +fig.suptitle("Global error scale posterior by chain") +plt.tight_layout() +plt.show() ``` :::: @@ -304,7 +373,10 @@ num_mcmc <- 2000 ## Python ```{python} -# Python implementation coming soon +num_chains = 4 +num_gfr = 5 +num_burnin = 1000 +num_mcmc = 2000 ``` :::: @@ -330,7 +402,13 @@ xbart_model_string <- stochtree::saveBARTModelToJsonString(xbart_model) ## Python ```{python} -# Python implementation coming soon +xbart_model = BARTModel() +xbart_model.sample( + X_train=X_train, leaf_basis_train=leaf_basis_train, y_train=y_train, + num_gfr=num_gfr, num_burnin=0, num_mcmc=0, + general_params={"num_threads": 1}, +) +xbart_model_json = xbart_model.to_json() ``` :::: @@ -359,7 +437,14 @@ bart_model <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, leaf_basis_train=leaf_basis_train, y_train=y_train, + num_gfr=0, num_burnin=num_burnin, num_mcmc=num_mcmc, + general_params={"num_threads": 1, "num_chains": num_chains}, + previous_model_json=xbart_model_json, + previous_model_warmstart_sample_num=num_gfr - 1, # 0-indexed +) ``` :::: @@ -383,7 +468,14 @@ abline(0, 1, col = "red", lty = 3, lwd = 3) ## Python ```{python} -# Python implementation coming soon +y_hat_test = bart_model.predict( + X=X_test, leaf_basis=leaf_basis_test, type="mean", terms="y_hat" +) +lo, hi = min(y_hat_test.min(), y_test.min()), max(y_hat_test.max(), y_test.max()) +plt.scatter(y_hat_test, y_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted"); plt.ylabel("Actual") +plt.show() ``` :::: @@ -420,7 +512,16 @@ cat(paste0( ## Python ```{python} -# Python implementation coming soon +idata = az.from_dict({"posterior": {"sigma2": bart_model.global_var_samples.reshape(num_chains, num_mcmc)}}) + +az.plot_trace(idata) +plt.axhline(noise_sd**2, color="black", linestyle="dashed", linewidth=1.5) +plt.show() + +print("ESS: ", az.ess(idata)) +print("R-hat:", az.rhat(idata)) +az.plot_autocorr(idata) +plt.show() ``` :::: @@ -442,7 +543,7 @@ dimnames(coda_array) <- list( ## Python ```{python} -# Python implementation coming soon +sigma2_chains = bart_model.global_var_samples.reshape(num_chains, num_mcmc) ``` :::: @@ -476,25 +577,31 @@ bayesplot::mcmc_hist_by_chain( ## Python ```{python} -# Python implementation coming soon +fig, axes = plt.subplots(1, num_chains, figsize=(12, 3), sharey=True) +for i, ax in enumerate(axes): + ax.hist(sigma2_chains[i], bins=30) + ax.axvline(noise_sd**2, color="black", linestyle="dashed", linewidth=1.5) + ax.set_title(f"Chain {i+1}") + ax.set_xlabel(r"$\sigma^2$") +fig.suptitle("Global error scale posterior by chain") +plt.tight_layout() +plt.show() ``` :::: ## Sampling Multiple Chains in Parallel -While the above examples used sequential multi-chain sampling internally in `bart()` -and `bcf()`, it is also possible to run chains in parallel via `doParallel`. While -`bartmodel` or `bcfmodel` objects contain external pointers to C++ data structures -which are not reachable by other processes, we can serialize `stochtree` models to -JSON for cross-process communication. After `num_chains` models have been run in -parallel and their JSON representations have been collated in the primary R session, -we can combine these into a single `bartmodel` or `bcfmodel` object via -`createBARTModelFromCombinedJsonString()` or `createBCFModelFromCombinedJsonString()`. +While the above examples used sequential multi-chain sampling internally, it is also +possible to run chains in parallel. In R, this is done via `doParallel` / `foreach`; +in Python, via `concurrent.futures.ProcessPoolExecutor`. In both cases, each chain +is serialized to JSON for cross-process communication, then combined into a single +model via `createBARTModelFromCombinedJsonString()` (R) or +`BARTModel.from_json_string_list()` (Python). -In order to run multiple parallel stochtree chains, a parallel backend must be -registered in your R environment. Note that we do not evaluate the cluster setup -code below in order to interact nicely with CRAN / GitHub Actions environments. +In order to run multiple parallel stochtree chains in R, a parallel backend must be +registered. Note that we do not evaluate the cluster setup code below in order to +interact nicely with GitHub Actions. ::::{.panel-tabset group="language"} @@ -510,7 +617,22 @@ registerDoParallel(cl) ## Python ```{python} -# Python implementation coming soon +#| eval: false +# Worker function must be defined at module level for pickling +from concurrent.futures import ProcessPoolExecutor + +def _run_bart_chain(args): + X_tr, lb_tr, y_tr, X_te, lb_te, num_burnin, num_mcmc, seed = args + from stochtree import BARTModel + m = BARTModel() + m.sample( + X_train=X_tr, leaf_basis_train=lb_tr, y_train=y_tr, + X_test=X_te, leaf_basis_test=lb_te, + num_gfr=0, num_burnin=num_burnin, num_mcmc=num_mcmc, + general_params={"num_threads": 1, "random_seed": seed}, + mean_forest_params={"sample_sigma2_leaf": False}, + ) + return m.to_json(), m.y_hat_test ``` :::: @@ -529,7 +651,10 @@ num_mcmc <- 100 ## Python ```{python} -# Python implementation coming soon +num_chains = 4 +num_gfr = 0 +num_burnin = 100 +num_mcmc = 100 ``` :::: @@ -565,7 +690,18 @@ bart_model_outputs <- foreach(i = 1:num_chains) %dopar% ## Python ```{python} -# Python implementation coming soon +# Sequential loop — replace the loop body with ProcessPoolExecutor for true parallelism +bart_model_outputs = [] +for i in range(num_chains): + m = BARTModel() + m.sample( + X_train=X_train, leaf_basis_train=leaf_basis_train, y_train=y_train, + X_test=X_test, leaf_basis_test=leaf_basis_test, + num_gfr=0, num_burnin=num_burnin, num_mcmc=num_mcmc, + general_params={"num_threads": 1, "sample_sigma2_global": True, "random_seed": i + 1}, + mean_forest_params={"sample_sigma2_leaf": False}, + ) + bart_model_outputs.append({"model": m.to_json(), "yhat": m.y_hat_test}) ``` :::: @@ -584,7 +720,7 @@ stopCluster(cl) ## Python ```{python} -# Python implementation coming soon +# No explicit teardown required when using concurrent.futures context manager ``` :::: @@ -608,7 +744,12 @@ combined_bart <- createBARTModelFromCombinedJsonString(bart_model_strings) ## Python ```{python} -# Python implementation coming soon +bart_model_strings = [out["model"] for out in bart_model_outputs] +bart_model_yhats = np.column_stack([ + out["yhat"].mean(axis=1) for out in bart_model_outputs +]) # shape: (n_test, num_chains) +combined_bart = BARTModel() +combined_bart.from_json_string_list(bart_model_strings) ``` :::: @@ -624,7 +765,8 @@ yhat_combined <- predict(combined_bart, X_test, leaf_basis_test)$y_hat ## Python ```{python} -# Python implementation coming soon +# type="posterior" (default) returns the full n_test × (num_chains * num_mcmc) matrix +yhat_combined = combined_bart.predict(X=X_test, leaf_basis=leaf_basis_test, terms="y_hat") ``` :::: @@ -657,7 +799,18 @@ par(mfrow = c(1, 1)) ## Python ```{python} -# Python implementation coming soon +fig, axes = plt.subplots(2, 2, figsize=(8, 8)) +for i, ax in enumerate(axes.flat): + chain_combined = yhat_combined[:, i * num_mcmc:(i + 1) * num_mcmc].mean(axis=1) + chain_orig = bart_model_yhats[:, i] + lo = min(chain_combined.min(), chain_orig.min()) + hi = max(chain_combined.max(), chain_orig.max()) + ax.scatter(chain_combined, chain_orig, alpha=0.4, s=10) + ax.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=1.5) + ax.set_xlabel("Deserialized"); ax.set_ylabel("Original") + ax.set_title(f"Chain {i+1} Predictions") +plt.tight_layout() +plt.show() ``` :::: @@ -687,7 +840,17 @@ par(mfrow = c(1, 1)) ## Python ```{python} -# Python implementation coming soon +fig, axes = plt.subplots(2, 2, figsize=(8, 8)) +for i, ax in enumerate(axes.flat): + chain_pred = yhat_combined[:, i * num_mcmc:(i + 1) * num_mcmc].mean(axis=1) + lo = min(chain_pred.min(), y_test.min()) + hi = max(chain_pred.max(), y_test.max()) + ax.scatter(chain_pred, y_test, alpha=0.4, s=10) + ax.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=1.5) + ax.set_xlabel("Predicted"); ax.set_ylabel("Actual") + ax.set_title(f"Chain {i+1} Predictions") +plt.tight_layout() +plt.show() ``` :::: diff --git a/vignettes/multivariate-bcf.qmd b/vignettes/multivariate-bcf.qmd index 51cb9df8a..60ac05fb3 100644 --- a/vignettes/multivariate-bcf.qmd +++ b/vignettes/multivariate-bcf.qmd @@ -1,16 +1,23 @@ --- title: "Multivariate Treatment BCF" +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + BCF extended to vector-valued (multivariate) treatments, estimating heterogeneous effects for multiple treatment arms simultaneously. -::: {.callout-note} -This vignette is under construction. Content will be ported from: - -- **Python**: `stochtree_repo/demo/notebooks/multivariate_treatment_causal_inference.ipynb` -::: - ## Background When treatments are multivariate — such as continuous dose vectors or multiple @@ -25,29 +32,361 @@ covariate-varying treatment effects. ## Setup +::::{.panel-tabset group="language"} + +## R + +```{r} +library(stochtree) +``` + +## Python + ```{python} -#| eval: false -import stochtree +import matplotlib.pyplot as plt import numpy as np +import pandas as pd +import seaborn as sns +from sklearn.model_selection import train_test_split + +from stochtree import BCFModel ``` +:::: + ## Data Simulation +::::{.panel-tabset group="language"} + +## R + +```{r} +# Generate covariates and propensity scores +n <- 500 +p_X <- 5 +X <- matrix(runif(n * p_X), nrow = n, ncol = p_X) +pi_X <- cbind(0.25 + 0.5 * X[, 1], 0.75 - 0.5 * X[, 2]) +Z <- cbind(rbinom(n, 1, pi_X[, 1]), rbinom(n, 1, pi_X[, 2])) + +# Define outcome mean functions (prognostic and treatment effects) +mu_X <- pi_X[, 1] * 5 + pi_X[, 2] * 2 + 2 * X[, 3] +tau_X <- cbind(X[, 2], X[, 3]) + +# Generate outcome +treatment_term <- rowSums(tau_X * Z) +y <- mu_X + treatment_term + rnorm(n) +``` + +## Python + ```{python} -#| eval: false -# Simulate multivariate treatment data +# RNG +rng = np.random.default_rng() + +# Generate covariates and basis +n = 500 +p_X = 5 +X = rng.uniform(0, 1, (n, p_X)) +pi_X = np.c_[0.25 + 0.5 * X[:, 0], 0.75 - 0.5 * X[:, 1]] +Z = rng.binomial(1, pi_X, (n, 2)) + +# Define the outcome mean functions (prognostic and treatment effects) +mu_X = pi_X[:, 0] * 5 + pi_X[:, 1] * 2 + 2 * X[:, 2] +tau_X = np.stack((X[:, 1], X[:, 2]), axis=-1) + +# Generate outcome +epsilon = rng.normal(0, 1, n) +treatment_term = np.multiply(tau_X, Z).sum(axis=1) +y = mu_X + treatment_term + epsilon ``` +:::: + +::::{.panel-tabset group="language"} + +## R + +```{r} +n_test <- round(n * 0.5) +test_inds <- sort(sample(seq_len(n), n_test, replace = FALSE)) +train_inds <- setdiff(seq_len(n), test_inds) +X_train <- X[train_inds, ] +X_test <- X[test_inds, ] +Z_train <- Z[train_inds, ] +Z_test <- Z[test_inds, ] +y_train <- y[train_inds] +y_test <- y[test_inds] +mu_train <- mu_X[train_inds] +mu_test <- mu_X[test_inds] +tau_train <- tau_X[train_inds, ] +tau_test <- tau_X[test_inds, ] +``` + +## Python + +```{python} +sample_inds = np.arange(n) +train_inds, test_inds = train_test_split(sample_inds, test_size=0.5) +X_train = X[train_inds, :] +X_test = X[test_inds, :] +Z_train = Z[train_inds, :] +Z_test = Z[test_inds, :] +y_train = y[train_inds] +y_test = y[test_inds] +pi_train = pi_X[train_inds] +pi_test = pi_X[test_inds] +mu_train = mu_X[train_inds] +mu_test = mu_X[test_inds] +tau_train = tau_X[train_inds, :] +tau_test = tau_X[test_inds, :] +``` + +:::: + ## Model Fitting +::::{.panel-tabset group="language"} + +## R + +```{r} +# Note: propensity adjustment is not supported for multivariate treatment in bcf() +bcf_model <- bcf( + X_train = X_train, + Z_train = Z_train, + y_train = y_train, + X_test = X_test, + Z_test = Z_test, + num_gfr = 10, + num_mcmc = 100 +) +``` + +## Python + ```{python} -#| eval: false -# Fit multivariate BCF +bcf_model = BCFModel() +bcf_model.sample( + X_train=X_train, + Z_train=Z_train, + y_train=y_train, + propensity_train=pi_train, + X_test=X_test, + Z_test=Z_test, + propensity_test=pi_test, + num_gfr=10, + num_mcmc=100, +) ``` +:::: + ## Posterior Summaries +### Outcome + +::::{.panel-tabset group="language"} + +## R + +```{r} +# y_hat_test is (n_test x num_samples) +plot( + rowMeans(bcf_model$y_hat_test), y_test, + xlab = "Average estimated outcome", ylab = "True outcome" +) +abline(0, 1, col = "black", lty = 3) +``` + +```{r} +sqrt(mean((rowMeans(bcf_model$y_hat_test) - y_test)^2)) +``` + +## Python + +```{python} +forest_preds_y_mcmc = bcf_model.y_hat_test +y_avg_mcmc = np.squeeze(forest_preds_y_mcmc).mean(axis=1, keepdims=True) +y_df_mcmc = pd.DataFrame( + np.concatenate((np.expand_dims(y_test, 1), y_avg_mcmc), axis=1), + columns=["True outcome", "Average estimated outcome"], +) +sns.scatterplot(data=y_df_mcmc, x="Average estimated outcome", y="True outcome") +plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.show() +``` + +```{python} +np.sqrt(np.mean(np.power(y_avg_mcmc - y_test, 2))) +``` + +:::: + +### Treatment Effects + +::::{.panel-tabset group="language"} + +## R + +```{r} +# tau_hat_test is (n_test x p_Z x num_samples) for multivariate treatment +tau_avg_1 <- rowMeans(bcf_model$tau_hat_test[, 1, ]) +plot( + tau_test[, 1], tau_avg_1, + xlab = "True tau", ylab = "Average estimated tau", + main = "Treatment 1" +) +abline(0, 1, col = "black", lty = 3) +``` + +```{r} +tau_avg_2 <- rowMeans(bcf_model$tau_hat_test[, 2, ]) +plot( + tau_test[, 2], tau_avg_2, + xlab = "True tau", ylab = "Average estimated tau", + main = "Treatment 2" +) +abline(0, 1, col = "black", lty = 3) +``` + +## Python + +```{python} +treatment_idx = 0 +forest_preds_tau_mcmc = np.squeeze(bcf_model.tau_hat_test[:, :, treatment_idx]) +tau_avg_mcmc = np.squeeze(forest_preds_tau_mcmc).mean(axis=1, keepdims=True) +tau_df_mcmc = pd.DataFrame( + np.concatenate( + (np.expand_dims(tau_test[:, treatment_idx], 1), tau_avg_mcmc), axis=1 + ), + columns=["True tau", "Average estimated tau"], +) +sns.scatterplot(data=tau_df_mcmc, x="True tau", y="Average estimated tau") +plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.show() +``` + ```{python} -#| eval: false -# Per-treatment CATE posteriors +treatment_idx = 1 +forest_preds_tau_mcmc = np.squeeze(bcf_model.tau_hat_test[:, :, treatment_idx]) +tau_avg_mcmc = np.squeeze(forest_preds_tau_mcmc).mean(axis=1, keepdims=True) +tau_df_mcmc = pd.DataFrame( + np.concatenate( + (np.expand_dims(tau_test[:, treatment_idx], 1), tau_avg_mcmc), axis=1 + ), + columns=["True tau", "Average estimated tau"], +) +sns.scatterplot(data=tau_df_mcmc, x="True tau", y="Average estimated tau") +plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.show() +``` + +:::: + +### Treatment Term + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Compute sum_j(tau_hat(X)_j * Z_j) per observation per sample +treatment_term_mcmc <- apply(bcf_model$tau_hat_test, 3, function(tau_s) { + rowSums(tau_s * Z_test) +}) +true_treatment_term <- rowSums(tau_test * Z_test) +plot( + true_treatment_term, rowMeans(treatment_term_mcmc), + xlab = "True treatment term", ylab = "Average estimated treatment term" +) +abline(0, 1, col = "black", lty = 3) ``` + +## Python + +```{python} +treatment_term_mcmc_test = np.multiply( + np.atleast_3d(Z_test).swapaxes(1, 2), bcf_model.tau_hat_test +).sum(axis=2) +treatment_term_test = np.multiply(tau_test, Z_test).sum(axis=1) +treatment_term_mcmc_avg = np.squeeze(treatment_term_mcmc_test).mean( + axis=1, keepdims=True +) +mu_df_mcmc = pd.DataFrame( + np.concatenate( + (np.expand_dims(treatment_term_test, 1), treatment_term_mcmc_avg), axis=1 + ), + columns=["True treatment term", "Average estimated treatment term"], +) +sns.scatterplot( + data=mu_df_mcmc, x="True treatment term", y="Average estimated treatment term" +) +plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.show() +``` + +:::: + +### Prognostic Function + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot( + mu_test, rowMeans(bcf_model$mu_hat_test), + xlab = "True mu", ylab = "Average estimated mu" +) +abline(0, 1, col = "black", lty = 3) +``` + +## Python + +```{python} +forest_preds_mu_mcmc = bcf_model.mu_hat_test +mu_avg_mcmc = np.squeeze(forest_preds_mu_mcmc).mean(axis=1, keepdims=True) +mu_df_mcmc = pd.DataFrame( + np.concatenate((np.expand_dims(mu_test, 1), mu_avg_mcmc), axis=1), + columns=["True mu", "Average estimated mu"], +) +sns.scatterplot(data=mu_df_mcmc, x="True mu", y="Average estimated mu") +plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.show() +``` + +:::: + +### Global Error Variance + +::::{.panel-tabset group="language"} + +## R + +```{r} +plot( + bcf_model$sigma2_global_samples, + xlab = "Sample", ylab = expression(sigma^2) +) +``` + +## Python + +```{python} +sigma_df_mcmc = pd.DataFrame( + np.concatenate( + ( + np.expand_dims( + np.arange(bcf_model.num_samples - bcf_model.num_gfr), axis=1 + ), + np.expand_dims(bcf_model.global_var_samples[bcf_model.num_gfr:], axis=1), + ), + axis=1, + ), + columns=["Sample", "Sigma"], +) +sns.scatterplot(data=sigma_df_mcmc, x="Sample", y="Sigma") +plt.show() +``` + +:::: diff --git a/vignettes/ordinal-outcome.qmd b/vignettes/ordinal-outcome.qmd index c6bd9d32b..b92c34592 100644 --- a/vignettes/ordinal-outcome.qmd +++ b/vignettes/ordinal-outcome.qmd @@ -1,8 +1,21 @@ --- title: "Ordinal Outcome Modeling" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + This vignette demonstrates how to use BART to model ordinal outcomes with a complementary log-log (cloglog) link function (@alam2025unified). @@ -41,7 +54,9 @@ library(stochtree) ## Python ```{python} -# Python implementation coming soon +import numpy as np +import matplotlib.pyplot as plt +from stochtree import BARTModel, OutcomeModel ``` :::: @@ -105,7 +120,49 @@ y_test <- y[test_idx] ## Python ```{python} -# Python implementation coming soon +random_seed = 2026 +rng = np.random.default_rng(random_seed) + +# Sample size and number of predictors +n = 2000 +p = 5 + +# Design matrix and true lambda function +X = rng.standard_normal((n, p)) +beta = np.ones(p) / np.sqrt(p) +true_lambda = X @ beta + +# Set cutpoints for ordinal categories (3 categories: 1, 2, 3) +n_categories = 3 +gamma_true = np.array([-2.0, 1.0]) + +# True ordinal class probabilities +true_probs = np.zeros((n, n_categories)) +true_probs[:, 0] = 1 - np.exp(-np.exp(gamma_true[0] + true_lambda)) +for j in range(1, n_categories - 1): + true_probs[:, j] = ( + np.exp(-np.exp(gamma_true[j - 1] + true_lambda)) + * (1 - np.exp(-np.exp(gamma_true[j] + true_lambda))) + ) +true_probs[:, n_categories - 1] = 1 - true_probs[:, :-1].sum(axis=1) + +# Generate ordinal outcomes (1-indexed integers) +y = np.array( + [rng.choice(np.arange(1, n_categories + 1), p=true_probs[i]) for i in range(n)], + dtype=float, +) +unique, counts = np.unique(y, return_counts=True) +print("Outcome distribution:", dict(zip(unique.astype(int), counts))) + +# Train-test split +n_test = round(0.2 * n) +n_train = n - n_test +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_train = X[train_inds] +X_test = X[test_inds] +y_train = y[train_inds] +y_test = y[test_inds] ``` :::: @@ -149,7 +206,25 @@ bart_model <- bart( ## Python ```{python} -# Python implementation coming soon +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, + y_train=y_train, + X_test=X_test, + num_gfr=0, + num_burnin=1000, + num_mcmc=1000, + general_params={ + "num_threads": 1, + "cutpoint_grid_size": 100, + "sample_sigma2_global": False, + "keep_every": 1, + "num_chains": 1, + "random_seed": random_seed, + "outcome_model": OutcomeModel(outcome="ordinal", link="cloglog"), + }, + mean_forest_params={"num_trees": 50, "sample_sigma2_leaf": False}, +) ``` :::: @@ -190,7 +265,9 @@ est_probs_test <- predict( ## Python ```{python} -# Python implementation coming soon +# predict returns (n_obs, n_categories) posterior mean class probabilities +est_probs_train = bart_model.predict(X=X_train, scale="probability", terms="y_hat", type="mean") +est_probs_test = bart_model.predict(X=X_test, scale="probability", terms="y_hat", type="mean") ``` :::: @@ -239,7 +316,24 @@ abline(v = gamma_true[2], col = 'blue', lty = 3, lwd = 3) ## Python ```{python} -# Python implementation coming soon +# cutpoint_samples shape: (n_categories - 1, num_samples) +# shifted by per-sample mean of train predictions to remove non-identifiable intercept +cutpoint_samples = bart_model.extract_parameter("cloglog_cutpoints") +y_hat_train_post = bart_model.predict(X=X_train, scale="linear", terms="y_hat", type="posterior") +gamma1 = cutpoint_samples[0, :] + y_hat_train_post.mean(axis=0) +gamma2 = cutpoint_samples[1, :] + y_hat_train_post.mean(axis=0) + +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) +ax1.hist(gamma1, density=True, bins=40) +ax1.axvline(gamma_true[0], color="blue", linestyle="dotted", linewidth=2) +ax1.set_title("Posterior Distribution of Cutpoint 1") +ax1.set_xlabel("Cutpoint 1") +ax2.hist(gamma2, density=True, bins=40) +ax2.axvline(gamma_true[1], color="blue", linestyle="dotted", linewidth=2) +ax2.set_title("Posterior Distribution of Cutpoint 2") +ax2.set_xlabel("Cutpoint 2") +plt.tight_layout() +plt.show() ``` :::: @@ -308,7 +402,30 @@ text( ## Python ```{python} -# Python implementation coming soon +y_hat_train = bart_model.predict(X=X_train, scale="linear", terms="y_hat", type="mean") +y_hat_test = bart_model.predict(X=X_test, scale="linear", terms="y_hat", type="mean") +lambda_pred_train = y_hat_train - y_hat_train.mean() +lambda_pred_test = y_hat_test - y_hat_test.mean() +corr_train = np.corrcoef(true_lambda[train_inds], lambda_pred_train)[0, 1] +corr_test = np.corrcoef(true_lambda[test_inds], lambda_pred_test)[0, 1] + +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) +ax1.scatter(lambda_pred_train, true_lambda[train_inds], alpha=0.3, s=10) +ax1.axline((0, 0), slope=1, color="blue", linewidth=2) +ax1.set_title("Train Set: Predicted vs Actual") +ax1.set_xlabel("Predicted") +ax1.set_ylabel("Actual") +ax1.text(0.05, 0.95, f"Correlation: {corr_train:.3f}", transform=ax1.transAxes, + color="red", verticalalignment="top") +ax2.scatter(lambda_pred_test, true_lambda[test_inds], alpha=0.3, s=10) +ax2.axline((0, 0), slope=1, color="blue", linewidth=2) +ax2.set_title("Test Set: Predicted vs Actual") +ax2.set_xlabel("Predicted") +ax2.set_ylabel("Actual") +ax2.text(0.05, 0.95, f"Correlation: {corr_test:.3f}", transform=ax2.transAxes, + color="red", verticalalignment="top") +plt.tight_layout() +plt.show() ``` :::: @@ -345,7 +462,18 @@ for (j in 1:n_categories) { ## Python ```{python} -# Python implementation coming soon +fig, axes = plt.subplots(1, n_categories, figsize=(15, 5)) +for j in range(n_categories): + corr = np.corrcoef(true_probs[train_inds, j], est_probs_train[:, j])[0, 1] + axes[j].scatter(true_probs[train_inds, j], est_probs_train[:, j], alpha=0.3, s=10) + axes[j].axline((0, 0), slope=1, color="blue", linewidth=2) + axes[j].set_title(f"Training Set: True vs Estimated Probability, Class {j + 1}") + axes[j].set_xlabel("True Class Probability") + axes[j].set_ylabel("Estimated Class Probability") + axes[j].text(0.05, 0.95, f"Correlation: {corr:.3f}", transform=axes[j].transAxes, + color="red", verticalalignment="top") +plt.tight_layout() +plt.show() ``` :::: @@ -381,7 +509,18 @@ for (j in 1:n_categories) { ## Python ```{python} -# Python implementation coming soon +fig, axes = plt.subplots(1, n_categories, figsize=(15, 5)) +for j in range(n_categories): + corr = np.corrcoef(true_probs[test_inds, j], est_probs_test[:, j])[0, 1] + axes[j].scatter(true_probs[test_inds, j], est_probs_test[:, j], alpha=0.3, s=10) + axes[j].axline((0, 0), slope=1, color="blue", linewidth=2) + axes[j].set_title(f"Test Set: True vs Estimated Probability, Class {j + 1}") + axes[j].set_xlabel("True Class Probability") + axes[j].set_ylabel("Estimated Class Probability") + axes[j].text(0.05, 0.95, f"Correlation: {corr:.3f}", transform=axes[j].transAxes, + color="red", verticalalignment="top") +plt.tight_layout() +plt.show() ``` :::: diff --git a/vignettes/prior-calibration.qmd b/vignettes/prior-calibration.qmd index 267200afa..cb6be2cb2 100644 --- a/vignettes/prior-calibration.qmd +++ b/vignettes/prior-calibration.qmd @@ -1,8 +1,21 @@ --- title: "Prior Calibration" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + This vignette demonstrates prior calibration approaches for the parametric components of stochastic tree ensembles (@chipman2010bart). @@ -42,13 +55,59 @@ calibrated as follows: 2. $\lambda$ is chosen to ensure that $p(\sigma^2 < \hat{\sigma}^2) = q$ for some value $q$, typically set to a default value of 0.9. -This is done in `stochtree` via the `calibrateInverseGammaErrorVariance` function. +# Setup + +Load the necessary packages + +:::{.panel-tabset group="language"} + +## R ```{r} -# Load library +#| message: false library(stochtree) +``` + +## Python + +```{python} +import numpy as np +import matplotlib.pyplot as plt +from stochtree import BARTModel, calibrate_global_error_variance +``` + +::: + +Set a seed for reproducibility + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| message: false +random_seed <- 1234 +set.seed(random_seed) +``` -# Generate data +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +::: + +# Data Generation + +Generate data for a straightforward supervised learning problem + +::::{.panel-tabset group="language"} + +## R + +```{r} n <- 500 p <- 5 X <- matrix(runif(n*p), ncol = p) @@ -60,8 +119,33 @@ f_XW <- ( ) noise_sd <- 1 y <- f_XW + rnorm(n, 0, noise_sd) +``` + +## Python + +```{python} +n = 500 +p = 5 +X = rng.uniform(size=(n, p)) +f_XW = ( + ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * (-7.5) + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * (-2.5) + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * (2.5) + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * (7.5) +) +noise_sd = 1.0 +y = f_XW + rng.normal(0, noise_sd, n) +``` + +:::: + +Split into train and test set + +::::{.panel-tabset group="language"} -# Test/train split +## R + +```{r} test_set_pct <- 0.2 n_test <- round(test_set_pct*n) n_train <- n - n_test @@ -71,13 +155,51 @@ X_test <- X[test_inds,] X_train <- X[train_inds,] y_test <- y[test_inds] y_train <- y[train_inds] +``` + +## Python + +```{python} +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +n_train = n - n_test +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test = X[test_inds] +X_train = X[train_inds] +y_test = y[test_inds] +y_train = y[train_inds] +``` + +:::: + +# Model Sampling + +First, we calibrate the scale parameter for the variance term as in Chipman et al (2010) -# Calibrate the scale parameter for the variance term as in Chipman et al (2010) +::::{.panel-tabset group="language"} + +## R + +```{r} nu <- 3 lambda <- calibrateInverseGammaErrorVariance(y_train, X_train, nu = nu) ``` -Now we run a BART model with this variance parameterization +## Python + +```{python} +nu = 3 +lambda_ = calibrate_global_error_variance(X_train, y_train, nu=nu) +``` + +:::: + +Then, we run a BART model with this variance parameterization + +::::{.panel-tabset group="language"} + +## R ```{r} general_params <- list(sigma2_global_shape = nu/2, sigma2_global_scale = (nu*lambda)/2) @@ -86,18 +208,70 @@ bart_model <- bart(X_train = X_train, y_train = y_train, X_test = X_test, general_params = general_params) ``` +## Python + +```{python} +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=0, num_burnin=1000, num_mcmc=100, + general_params={ + "num_threads": 1, + "sigma2_global_shape": nu / 2, + "sigma2_global_scale": (nu * lambda_) / 2, + }, +) +``` + +:::: + Inspect the out-of-sample predictions of the model +::::{.panel-tabset group="language"} + +## R + ```{r} plot(rowMeans(bart_model$y_hat_test), y_test, xlab = "predicted", ylab = "actual") abline(0,1,col="red",lty=3,lwd=3) ``` +## Python + +```{python} +pred_mean = bart_model.y_hat_test.mean(axis=1) +lo = min(pred_mean.min(), y_test.min()) +hi = max(pred_mean.max(), y_test.max()) +plt.scatter(pred_mean, y_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.show() +``` + +:::: + Inspect the posterior samples of $\sigma^2$ +::::{.panel-tabset group="language"} + +## R + ```{r} plot(bart_model$sigma2_global_samples, ylab = "sigma^2", xlab = "iteration") abline(h = noise_sd^2, col = "red", lty = 3, lwd = 3) ``` +## Python + +```{python} +plt.plot(bart_model.global_var_samples) +plt.xlabel("Iteration") +plt.ylabel(r"$\sigma^2$") +plt.axhline(noise_sd**2, color="red", linestyle="dashed", linewidth=2) +plt.show() +``` + +:::: + # References diff --git a/vignettes/rdd.qmd b/vignettes/rdd.qmd index 6df9bec85..ba4c959f1 100644 --- a/vignettes/rdd.qmd +++ b/vignettes/rdd.qmd @@ -8,7 +8,9 @@ author: - name: Drew Herren affiliation: University of Texas at Austin date: today -bibliography: R/RDD/rdd.bib +bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- ::: {.hidden} @@ -30,12 +32,15 @@ $$ ```{r} #| include: false reticulate::use_python( - Sys.getenv("RETICULATE_PYTHON", unset = Sys.which("python3")), + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), required = TRUE ) ``` -## Introduction +# Introduction We study conditional average treatment effect (CATE) estimation for regression discontinuity designs (RDD), in which treatment assignment is based on whether a @@ -50,7 +55,7 @@ provided the conditional expectation $E[Y \mid X,W]$ is continuous at that point _all_ $W=w$. We exploit this assumption with the leaf regression BART model implemented in stochtree, which allows us to define an explicit prior on the CATE. -## Regression Discontinuity Design +# Regression Discontinuity Design We conceptualize the treatment effect estimation problem via a quartet of random variables $(Y, X, Z, U)$. The variable $Y$ is the outcome variable; $X$ is the running @@ -82,7 +87,7 @@ violated, so the overall ATE $\bar{\tau} = E(\tau(X))$ is unidentified. We inste estimate $\tau(0) = \mu_1(0) - \mu_0(0)$, which is identified for continuous $X$ under the assumption that $\mu_1$ and $\mu_0$ are suitably smooth at $x = 0$. -### Conditional Average Treatment Effects in RDD +## Conditional Average Treatment Effects in RDD We are concerned with learning not only $\tau(0)$ but also RDD CATEs, $\tau(0, \w)$ for covariate vector $\w$. Defining potential outcome means @@ -96,7 +101,7 @@ must assume $\mu_1(x,\w)$ and $\mu_0(x,\w)$ are suitably smooth in $x$ for every CATE estimation in RDDs then reduces to estimating $E[Y \mid X=x, W=\w, Z=z]$, for which we turn to BART. -## The BARDDT Model +# The BARDDT Model We propose a BART model where the trees split on $(x,\w)$ but each leaf node parameter is a vector of regression coefficients tailored to the RDD context. Let $\psi$ denote @@ -139,12 +144,14 @@ variable, there is no need to separately define a bandwidth as in polynomial RDD regression trees automatically determine (in the course of posterior sampling) when to prune away regions far from the cutoff. -## Demo +# Demo In this section, we provide code for implementing BARDDT in `stochtree` on a popular RDD dataset. -### Load Libraries +## Setup + +Load the necessary packages :::{.panel-tabset group="language"} @@ -172,7 +179,28 @@ from stochtree import BARTModel ::: -### Dataset +Set a seed for reproducibility + +:::{.panel-tabset group="language"} + +## R + +```{r} +#| message: false +random_seed <- 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +::: + +## Dataset The data comes from @lindo2010ability, who analyze data on college students at a large Canadian university to evaluate an academic probation policy. Students whose GPA falls @@ -190,21 +218,36 @@ quantile (`hsgrade_pct`). ## R ```{r} +# Load and organize data data <- read.csv("https://raw.githubusercontent.com/rdpackages-replication/CIT_2024_CUP/refs/heads/main/CIT_2024_CUP_discrete.csv") y <- data$nextGPA x <- data$X +n <- nrow(data) + +# Standardize x x <- x / sd(x) + +# Extract covariates w <- data[, 4:11] + +# Encode categorical features as ordered/unordered factors w$totcredits_year1 <- factor(w$totcredits_year1, ordered = TRUE) w$male <- factor(w$male, ordered = FALSE) w$bpl_north_america <- factor(w$bpl_north_america, ordered = FALSE) w$loc_campus1 <- factor(w$loc_campus1, ordered = FALSE) w$loc_campus2 <- factor(w$loc_campus2, ordered = FALSE) w$loc_campus3 <- factor(w$loc_campus3, ordered = FALSE) + +# x is normalized so the cutoff occurs at c = 0 c <- 0 -n <- nrow(data) + +# Binarize the running variable into a "treatment" indicator z <- as.numeric(x > c) + +# Window for prediction sample h <- 0.1 + +# Define the prediction subset test <- -h < x & x < h ntest <- sum(test) ``` @@ -212,31 +255,49 @@ ntest <- sum(test) ## Python ```{python} +# Load and organize data data = pd.read_csv("https://raw.githubusercontent.com/rdpackages-replication/CIT_2024_CUP/refs/heads/main/CIT_2024_CUP_discrete.csv") -y = data.loc[:, "nextGPA"].to_numpy() -x = data.loc[:, "X"].to_numpy() +y = data.loc[:, "nextGPA"].to_numpy().squeeze() +x = data.loc[:, "X"].to_numpy().squeeze() +n = data.shape[0] + +# Standardize x x = x / np.std(x) + +# Extract covariates w = data.iloc[:, 3:11] -ordered_cat = pd.api.types.CategoricalDtype(ordered=True) -unordered_cat = pd.api.types.CategoricalDtype(ordered=False) -w.loc[:, "totcredits_year1"] = w.loc[:, "totcredits_year1"].astype(ordered_cat) -w.loc[:, "male"] = w.loc[:, "male"].astype(unordered_cat) -w.loc[:, "bpl_north_america"] = w.loc[:, "bpl_north_america"].astype(unordered_cat) -w.loc[:, "loc_campus1"] = w.loc[:, "loc_campus1"].astype(unordered_cat) -w.loc[:, "loc_campus2"] = w.loc[:, "loc_campus2"].astype(unordered_cat) -w.loc[:, "loc_campus3"] = w.loc[:, "loc_campus3"].astype(unordered_cat) +# Encode categorical features as ordered/unordered factors +w["totcredits_year1"] = pd.Categorical( + w["totcredits_year1"], ordered=True +) +unordered_categorical_cols = [ + "male", + "bpl_north_america", + "loc_campus1", + "loc_campus2", + "loc_campus3", +] +for col in unordered_categorical_cols: + w.loc[:, col] = pd.Categorical(w.loc[:, col], ordered=False) + +# x is normalized so the cutoff occurs at c = 0 c = 0 -n = data.shape[0] -z = np.where(x > c, 1.0, 0.0) + +# Binarize the running variable into a "treatment" indicator +z = (x > c).astype(float) + +# Window for prediction sample h = 0.1 -test = (x > -h) & (x < h) -ntest = int(np.sum(test)) + +# Define the prediction subset +test = (-h < x) & (x < h) +ntest = np.sum(test) ``` ::: -### Target Estimand +## Target Estimand Our estimand is the CATE function at $x = 0$, i.e. $\tau(0, \w)$. To focus on feasible estimation points, we restrict to observed $\w_i$ such that $|x_i| \leq \delta$ @@ -246,7 +307,7 @@ $$ \tau(0, \w_i) \quad \forall i \text{ such that } |x_i| \leq \delta. $$ -### Implementing BARDDT +## Implementing BARDDT The $\psi$ basis vector for the leaf regression is $\psi = [1,\, zx,\, (1-z)x,\, z]$, and the training covariate matrix is @@ -258,109 +319,144 @@ $$ :::{.panel-tabset group="language"} +Define basis functions for model sampling + ## R ```{r} -fit_barddt <- function(y, x, w, z, test, c, - num_gfr = 2, num_mcmc = 500) { - n <- length(y) - barddt_global <- list(standardize = TRUE, - sample_sigma_global = TRUE, - sigma2_global_init = 0.1) - barddt_mean <- list(num_trees = 50, min_samples_leaf = 20, - alpha = 0.95, beta = 2, max_depth = 20, - sample_sigma2_leaf = FALSE, - sigma2_leaf_init = diag(rep(0.1 / 50, 4))) - B <- cbind(rep(1, n), z * x, (1 - z) * x, z) - B1 <- cbind(rep(1, n), rep(c, n), rep(0, n), rep(1, n))[test, ] - B0 <- cbind(rep(1, n), rep(0, n), rep(c, n), rep(0, n))[test, ] - Xmat <- as.matrix(cbind(rep(0, n), w))[test, ] - fit <- stochtree::bart( - X_train = as.matrix(cbind(x, w)), y_train = y, - leaf_basis_train = B, - mean_forest_params = barddt_mean, - general_params = barddt_global, - num_gfr = num_gfr, num_mcmc = num_mcmc - ) - pred1 <- predict(fit, Xmat, B1)$y_hat - pred0 <- predict(fit, Xmat, B0)$y_hat - pred1 - pred0 -} +Psi <- cbind(rep(1, n), z * x, (1 - z) * x, z) ``` ## Python ```{python} -def estimate_barddt(y, x, w, z, test, c, - num_gfr=2, num_mcmc=100, seed=None): - n = y.shape[0] - global_params = {"standardize": True, - "sample_sigma_global": True, - "sigma2_global_init": 0.1} - if seed is not None: - global_params["random_seed"] = seed - mean_params = {"num_trees": 50, "min_samples_leaf": 20, - "alpha": 0.95, "beta": 2, "max_depth": 20, - "sample_sigma2_leaf": False, - "sigma2_leaf_init": np.diag(np.repeat(0.1 / 150, 4))} - Psi = np.column_stack([np.ones(n), z * x, (1 - z) * x, z]) - Psi1 = np.column_stack([np.ones(n), np.repeat(c, n), - np.zeros(n), np.ones(n)])[test, :] - Psi0 = np.column_stack([np.ones(n), np.zeros(n), - np.repeat(c, n), np.zeros(n)])[test, :] - Xmat = np.column_stack([np.zeros(n), w])[test, :] - model = BARTModel() - model.sample(X_train=np.column_stack([x, w]), y_train=y, - leaf_basis_train=Psi, num_gfr=num_gfr, num_mcmc=num_mcmc, - general_params=global_params, mean_forest_params=mean_params) - return model.predict(Xmat, Psi1) - model.predict(Xmat, Psi0) +Psi = np.c_[np.ones(n), z * x, (1 - z) * x, z] ``` ::: -### Fitting the Model +## Fitting the Model -We run multiple chains and combine their posterior draws. +We run multiple chains and combine their posterior draws. To compute the CATE posterior, we obtain $Y(z)$ predictions by predicting from the model with $Z = z$ set in the basis. `stochtree` provides a function / method (`computeContrastBARTModel` in R, `compute_contrast` in Python) for directly computing this contrast from a sampled BART model. :::{.panel-tabset group="language"} ## R ```{r} -#| cache: true -num_chains <- 20 -num_gfr <- 2 -num_mcmc <- 500 - -ncores <- min(5, parallel::detectCores() - 1) -cl <- makeCluster(ncores) -registerDoParallel(cl) - -chain_outputs <- foreach(i = seq_len(num_chains)) %dopar% { - fit_barddt(y, x, w, z, test, c, - num_gfr = num_gfr, num_mcmc = num_mcmc) -} -stopCluster(cl) +# Define sampling parameters +num_chains <- 4 +num_gfr <- 4 +num_burnin <- 0 +num_mcmc <- 500 + +# Parameter lists for BART model fit +global_params <- list( + standardize = T, + sample_sigma_global = TRUE, + sigma2_global_init = 0.1, + random_seed = random_seed, + num_threads = 1, + num_chains = num_chains +) +forest_params <- list( + num_trees = 50, + min_samples_leaf = 20, + alpha = 0.95, + beta = 2, + max_depth = 20, + sample_sigma2_leaf = FALSE, + sigma2_leaf_init = 0.1 / 50 +) + +# Fit the BART model +bart_model <- bart( + X_train = cbind(x, w), + leaf_basis_train = Psi, + y_train = y, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = global_params, + mean_forest_params = forest_params +) -pred <- do.call("cbind", chain_outputs) +# Compute the CATE posterior +Psi0 <- cbind(rep(1, n), rep(0, n), rep(0, n), rep(0, n))[test, ] +Psi1 <- cbind(rep(1, n), rep(0, n), rep(0, n), rep(1, n))[test, ] +covariates_test <- cbind(x = rep(0, n), w)[test, ] +cate_posterior <- computeContrastBARTModel( + bart_model, + X_0 = covariates_test, + X_1 = covariates_test, + leaf_basis_0 = Psi0, + leaf_basis_1 = Psi1, + type = "posterior", + scale = "linear" +) ``` ## Python ```{python} -#| cache: true -num_chains <- 4 -num_mcmc <- 100 -cate_result = np.empty((ntest, num_chains * num_mcmc)) -for i in range(num_chains): - draws = estimate_barddt(y, x, w, z, test, c, - num_gfr=2, num_mcmc=num_mcmc, seed=i) - cate_result[:, (i * num_mcmc):((i + 1) * num_mcmc)] = draws +# Define sampling parameters +num_chains = 4 +num_gfr = 4 +num_burnin = 0 +num_mcmc = 500 + +# Parameter lists for BART model fit +global_params = { + "standardize": True, + "sample_sigma_global": True, + "sigma2_global_init": 0.1, + "random_seed": random_seed, + "num_threads": 1, + "num_chains": num_chains +} +forest_params = { + "num_trees": 50, + "min_samples_leaf": 20, + "alpha": 0.95, + "beta": 2, + "max_depth": 20, + "sample_sigma2_leaf": False, + "sigma2_leaf_init": 0.1 / 50, +} + +# Fit the BART model +covariates_train = w +covariates_train.loc[:, "x"] = x +bart_model = BARTModel() +bart_model.sample( + X_train=covariates_train, + leaf_basis_train=Psi, + y_train=y, + num_gfr=num_gfr, + num_burnin=num_burnin, + num_mcmc=num_mcmc, + general_params=global_params, + mean_forest_params=forest_params, +) + +# Compute the CATE posterior +Psi0 = np.c_[np.ones(n), np.zeros(n), np.zeros(n), np.zeros(n)][test, :] +Psi1 = np.c_[np.ones(n), np.zeros(n), np.zeros(n), np.ones(n)][test, :] +covariates_test = w.iloc[test, :] +covariates_test.loc[:, "x"] = np.zeros(ntest) +cate_posterior = bart_model.compute_contrast( + X_0=covariates_test, + X_1=covariates_test, + leaf_basis_0=Psi0, + leaf_basis_1=Psi1, + type="posterior", + scale="linear", +) ``` ::: -### Analyzing CATE Heterogeneity +## Analyzing CATE Heterogeneity To summarize the CATE posterior we fit a regression tree to the posterior mean point estimates $\bar{\tau}_i = \frac{1}{M} \sum_{h=1}^M \tau^{(h)}(0, \w_i)$, @@ -372,7 +468,7 @@ using $W$ as predictors. We restrict to observations with $|x_i| \leq \delta$. ```{r} #| fig-cap: "Regression tree fit to posterior point estimates of individual treatment effects. Top number in each box is the average subgroup treatment effect; lower number is the share of the sample." -cate <- rpart(y ~ ., data.frame(y = rowMeans(pred), w[test, ]), +cate <- rpart(y ~ ., data.frame(y = rowMeans(cate_posterior), w[test, ]), control = rpart.control(cp = 0.015)) plot_cart <- function(rp) { @@ -392,9 +488,11 @@ rpart.plot(cate, main = "", box.col = plot_cart(cate)) ```{python} #| fig-cap: "Decision tree fit to posterior mean CATEs, used as an effect moderation summary." -y_surrogate = np.mean(cate_result, axis=1) +y_surrogate = np.mean(cate_posterior, axis=1) X_surrogate = w.iloc[test, :] -cate_tree = DecisionTreeRegressor(min_impurity_decrease=0.0001) +cp = 0.015 +min_impurity_decrease = cp * np.var(y_surrogate) +cate_tree = DecisionTreeRegressor(min_impurity_decrease=min_impurity_decrease) cate_tree.fit(X=X_surrogate, y=y_surrogate) plot_tree(cate_tree, impurity=False, filled=True, feature_names=w.columns, proportion=False, @@ -408,7 +506,7 @@ The resulting tree indicates that course load (`totcredits_year1`) in the academ leading to probation is a strong moderator of the treatment effect. The tree also flags campus, age at entry, and gender as secondary moderators — all prima facie plausible. -### Comparing Subgroup Posteriors +## Comparing Subgroup Posteriors The effect moderation tree is a posterior summary tool; it does not alter the posterior itself. We can compare any two subgroups by averaging their individual @@ -440,7 +538,7 @@ cate_kde <- function(rp, pred) { cate_b <- colMeans(pred[right, , drop = FALSE]) MASS::kde2d(cate_a, cate_b, n = 200) } -contour(cate_kde(cate, pred), bty = "n", +contour(cate_kde(cate, cate_posterior), bty = "n", xlab = "Group A", ylab = "Group B") abline(a = 0, b = 1) ``` @@ -450,11 +548,13 @@ abline(a = 0, b = 1) ```{python} #| fig-cap: "Joint KDE of Group A and Group B CATE posteriors. Contours above the diagonal indicate Group B has persistently higher treatment effects." predicted_nodes = cate_tree.apply(X=X_surrogate) -posterior_group_a = np.mean(cate_result[predicted_nodes == 2, :], axis=0) -posterior_group_b = np.mean(cate_result[predicted_nodes == 6, :], axis=0) +max_value_node = np.argmax(cate_tree.tree_.value) +min_value_node = np.argmin(cate_tree.tree_.value) +posterior_group_a = np.mean(cate_posterior[predicted_nodes == min_value_node, :], axis=0) +posterior_group_b = np.mean(cate_posterior[predicted_nodes == max_value_node, :], axis=0) posterior_df = pd.DataFrame({"group_a": posterior_group_a, - "group_b": posterior_group_b}) -sns.kdeplot(data=posterior_df, x="group_b", y="group_a") + "group_b": posterior_group_b}) +sns.kdeplot(data=posterior_df, x="group_a", y="group_b") plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) plt.show() ``` @@ -469,4 +569,4 @@ As always, CATEs that vary with observable factors do not necessarily represent _causal_ moderating relationship; uncovering these patterns is crucial for suggesting causal mechanisms to investigate in future studies. -## References +# References diff --git a/vignettes/serialization.qmd b/vignettes/serialization.qmd index 5ab4f416d..f92b4a5aa 100644 --- a/vignettes/serialization.qmd +++ b/vignettes/serialization.qmd @@ -1,14 +1,28 @@ --- title: "Model Serialization" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + This vignette demonstrates how to serialize ensemble models to JSON files and deserialize back to an R or Python session, where the forests and other parameters can be used for prediction and further analysis. -We also define several simple helper functions used in the data generating processes -below. +# Setup + +Load necessary packages ::::{.panel-tabset group="language"} @@ -16,6 +30,28 @@ below. ```{r} library(stochtree) +``` + +## Python + +```{python} +import json +import numpy as np +import matplotlib.pyplot as plt +import pandas as pd +from scipy.stats import norm +from stochtree import BARTModel, BCFModel +``` + +:::: + +Define several simple helper functions used in the data generating processes below + +::::{.panel-tabset group="language"} + +## R + +```{r} g <- function(x) {ifelse(x[,5]==1,2,ifelse(x[,5]==2,-1,-4))} mu1 <- function(x) {1+g(x)+x[,1]*x[,3]} mu2 <- function(x) {1+g(x)+6*abs(x[,3]-1)} @@ -26,49 +62,61 @@ tau2 <- function(x) {1+2*x[,2]*x[,4]} ## Python ```{python} -# Python implementation coming soon +def g(x): return np.where(x[:,4]==1, 2, np.where(x[:,4]==2, -1, -4)) +def mu1(x): return 1 + g(x) + x[:,0] * x[:,2] +def mu2(x): return 1 + g(x) + 6 * np.abs(x[:,2] - 1) +def tau1(x): return np.full(x.shape[0], 3.0) +def tau2(x): return 1 + 2 * x[:,1] * x[:,3] ``` :::: -# Demo 1: Bayesian Causal Forest (BCF) +Set a seed for reproducibility -BCF models are initially sampled and constructed using the `bcf()` function. +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed = 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +:::: + +# BART Serialization + +BART models are initially sampled and constructed using the `bart()` function. Here we show how to save and reload models from JSON files on disk. ## Model Building -Draw from a modified version of the data generating process defined in -@hahn2020bayesian. +Draw from a relatively straightforward heteroskedastic supervised learning DGP. ::::{.panel-tabset group="language"} ## R ```{r} -# Generate synthetic data -n <- 1000 -snr <- 2 -x1 <- rnorm(n) -x2 <- rnorm(n) -x3 <- rnorm(n) -x4 <- as.numeric(rbinom(n,1,0.5)) -x5 <- as.numeric(sample(1:3,n,replace=TRUE)) -X <- cbind(x1,x2,x3,x4,x5) -p <- ncol(X) -mu_x <- mu1(X) -tau_x <- tau2(X) -pi_x <- 0.8*pnorm((3*mu_x/sd(mu_x)) - 0.5*X[,1]) + 0.05 + runif(n)/10 -Z <- rbinom(n,1,pi_x) -E_XZ <- mu_x + Z*tau_x -rfx_group_ids <- rep(c(1,2), n %/% 2) -rfx_coefs <- matrix(c(-1, -1, 1, 1), nrow=2, byrow=TRUE) -rfx_basis <- cbind(1, runif(n, -1, 1)) -rfx_term <- rowSums(rfx_coefs[rfx_group_ids,] * rfx_basis) -y <- E_XZ + rfx_term + rnorm(n, 0, 1)*(sd(E_XZ)/snr) -X <- as.data.frame(X) -X$x4 <- factor(X$x4, ordered = TRUE) -X$x5 <- factor(X$x5, ordered = TRUE) +# Generate the data +n <- 500 +p_x <- 10 +X <- matrix(runif(n*p_x), ncol = p_x) +f_XW <- 0 +s_XW <- ( + ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5*X[,3]) + + ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1*X[,3]) + + ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2*X[,3]) + + ((0.75 <= X[,1]) & (1 > X[,1])) * (3*X[,3]) +) +y <- f_XW + rnorm(n, 0, 1)*s_XW # Split data into test and train sets test_set_pct <- 0.2 @@ -76,35 +124,43 @@ n_test <- round(test_set_pct*n) n_train <- n - n_test test_inds <- sort(sample(1:n, n_test, replace = FALSE)) train_inds <- (1:n)[!((1:n) %in% test_inds)] -X_test <- X[test_inds,] -X_train <- X[train_inds,] -pi_test <- pi_x[test_inds] -pi_train <- pi_x[train_inds] -Z_test <- Z[test_inds] -Z_train <- Z[train_inds] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) y_test <- y[test_inds] y_train <- y[train_inds] -mu_test <- mu_x[test_inds] -mu_train <- mu_x[train_inds] -tau_test <- tau_x[test_inds] -tau_train <- tau_x[train_inds] -rfx_group_ids_test <- rfx_group_ids[test_inds] -rfx_group_ids_train <- rfx_group_ids[train_inds] -rfx_basis_test <- rfx_basis[test_inds,] -rfx_basis_train <- rfx_basis[train_inds,] -rfx_term_test <- rfx_term[test_inds] -rfx_term_train <- rfx_term[train_inds] +s_x_test <- s_XW[test_inds] +s_x_train <- s_XW[train_inds] ``` ## Python ```{python} -# Python implementation coming soon +# Note: new rng here so Python Demo 2 is independent of Demo 1 +rng2 = np.random.default_rng(5678) + +n = 500 +p_x = 10 +X2 = rng2.uniform(size=(n, p_x)) +s_XW = ( + ((X2[:, 0] >= 0) & (X2[:, 0] < 0.25)) * (0.5 * X2[:, 2]) + + ((X2[:, 0] >= 0.25) & (X2[:, 0] < 0.5)) * (1.0 * X2[:, 2]) + + ((X2[:, 0] >= 0.5) & (X2[:, 0] < 0.75)) * (2.0 * X2[:, 2]) + + ((X2[:, 0] >= 0.75) & (X2[:, 0] < 1.0)) * (3.0 * X2[:, 2]) +) +y2 = rng2.standard_normal(n) * s_XW + +n_test2 = round(0.2 * n) +test_inds2 = rng2.choice(n, n_test2, replace=False) +train_inds2 = np.setdiff1d(np.arange(n), test_inds2) +X_test2 = pd.DataFrame(X2[test_inds2]) +X_train2 = pd.DataFrame(X2[train_inds2]) +y_test2 = y2[test_inds2] +y_train2 = y2[train_inds2] ``` :::: -Sample a BCF model. +Sample a BART model. ::::{.panel-tabset group="language"} @@ -114,63 +170,80 @@ Sample a BCF model. num_gfr <- 10 num_burnin <- 0 num_mcmc <- 100 -prognostic_forest_params <- list(sample_sigma2_leaf = F) -treatment_effect_forest_params <- list(sample_sigma2_leaf = F) -bcf_model <- bcf( - X_train = X_train, Z_train = Z_train, y_train = y_train, propensity_train = pi_train, - rfx_group_ids_train = rfx_group_ids_train, rfx_basis_train = rfx_basis_train, - X_test = X_test, Z_test = Z_test, propensity_test = pi_test, - rfx_group_ids_test = rfx_group_ids_test, rfx_basis_test = rfx_basis_test, +general_params <- list(sample_sigma2_global = F) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 100, + alpha = 0.95, beta = 2, min_samples_leaf = 5) +variance_forest_params <- list(num_trees = 50, alpha = 0.95, + beta = 1.25, min_samples_leaf = 1) +bart_model <- stochtree::bart( + X_train = X_train, y_train = y_train, X_test = X_test, num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - prognostic_forest_params = prognostic_forest_params, - treatment_effect_forest_params = treatment_effect_forest_params + general_params = general_params, mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params ) ``` ## Python ```{python} -# Python implementation coming soon +num_gfr = 10 +num_burnin = 0 +num_mcmc = 100 +bart_model = BARTModel() +bart_model.sample( + X_train=X_train2, y_train=y_train2, X_test=X_test2, + num_gfr=num_gfr, num_burnin=num_burnin, num_mcmc=num_mcmc, + general_params={"num_threads": 1, "sample_sigma2_global": False}, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 100, + "alpha": 0.95, "beta": 2.0, "min_samples_leaf": 5}, + variance_forest_params={"num_trees": 50, "alpha": 0.95, + "beta": 1.25, "min_samples_leaf": 1}, +) ``` :::: ## Serialization -Save the BCF model to disk. +Save the BART model to disk. ::::{.panel-tabset group="language"} ## R ```{r} -saveBCFModelToJsonFile(bcf_model, "bcf.json") +saveBARTModelToJsonFile(bart_model, "bart_r.json") ``` ## Python ```{python} -# Python implementation coming soon +bart_json_string = bart_model.to_json() +with open("bart_py.json", "w") as f: + json.dump(json.loads(bart_json_string), f) ``` :::: ## Deserialization -Reload the BCF model from disk. +Reload the BART model from disk. ::::{.panel-tabset group="language"} ## R ```{r} -bcf_model_reload <- createBCFModelFromJsonFile("bcf.json") +bart_model_reload <- createBARTModelFromJsonFile("bart_r.json") ``` ## Python ```{python} -# Python implementation coming soon +with open("bart_py.json", "r") as f: + bart_json_reload = json.dumps(json.load(f)) +bart_model_reload = BARTModel() +bart_model_reload.from_json(bart_json_reload) ``` :::: @@ -182,52 +255,85 @@ Check that the predictions align with those of the original model. ## R ```{r} -bcf_preds_reload <- predict(bcf_model_reload, X_train, Z_train, pi_train, rfx_group_ids_train, rfx_basis_train) -plot(rowMeans(bcf_model$mu_hat_train), rowMeans(bcf_preds_reload$mu_hat), - xlab = "Original", ylab = "Deserialized", main = "Prognostic forest") -abline(0,1,col="red",lwd=3,lty=3) -plot(rowMeans(bcf_model$tau_hat_train), rowMeans(bcf_preds_reload$tau_hat), - xlab = "Original", ylab = "Deserialized", main = "Treatment forest") +bart_preds_reload <- predict(bart_model_reload, X_train) +plot(rowMeans(bart_model$y_hat_train), rowMeans(bart_preds_reload$y_hat), + xlab = "Original", ylab = "Deserialized", main = "Conditional Mean Estimates") abline(0,1,col="red",lwd=3,lty=3) -plot(rowMeans(bcf_model$y_hat_train), rowMeans(bcf_preds_reload$y_hat), - xlab = "Original", ylab = "Deserialized", main = "Overall outcome") +plot(rowMeans(bart_model$sigma2_x_hat_train), rowMeans(bart_preds_reload$variance_forest_predictions), + xlab = "Original", ylab = "Deserialized", main = "Conditional Variance Estimates") abline(0,1,col="red",lwd=3,lty=3) ``` ## Python ```{python} -# Python implementation coming soon +bart_preds_orig = bart_model.predict(X=X_train2, terms=["y_hat", "variance_forest"]) +bart_preds_reload = bart_model_reload.predict(X=X_train2, terms=["y_hat", "variance_forest"]) + +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5)) +yhat_orig = bart_preds_orig["y_hat"].mean(axis=1) +yhat_reload = bart_preds_reload["y_hat"].mean(axis=1) +lo, hi = min(yhat_orig.min(), yhat_reload.min()), max(yhat_orig.max(), yhat_reload.max()) +ax1.scatter(yhat_orig, yhat_reload, alpha=0.4, s=10) +ax1.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +ax1.set_xlabel("Original") +ax1.set_ylabel("Deserialized") +ax1.set_title("Conditional Mean Estimates") + +# multi-term predict returns variance forest under "variance_forest_predictions" +vhat_orig = bart_preds_orig["variance_forest_predictions"].mean(axis=1) +vhat_reload = bart_preds_reload["variance_forest_predictions"].mean(axis=1) +lo, hi = min(vhat_orig.min(), vhat_reload.min()), max(vhat_orig.max(), vhat_reload.max()) +ax2.scatter(vhat_orig, vhat_reload, alpha=0.4, s=10) +ax2.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +ax2.set_xlabel("Original") +ax2.set_ylabel("Deserialized") +ax2.set_title("Conditional Variance Estimates") + +plt.tight_layout() +plt.show() ``` :::: -# Demo 2: BART +# Bayesian Causal Forest (BCF) Serialization -BART models are initially sampled and constructed using the `bart()` function. +BCF models are initially sampled and constructed using the `bcf()` function. Here we show how to save and reload models from JSON files on disk. ## Model Building -Draw from a relatively straightforward heteroskedastic supervised learning DGP. +Draw from a modified version of the data generating process defined in +@hahn2020bayesian. ::::{.panel-tabset group="language"} ## R ```{r} -# Generate the data -n <- 500 -p_x <- 10 -X <- matrix(runif(n*p_x), ncol = p_x) -f_XW <- 0 -s_XW <- ( - ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5*X[,3]) + - ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1*X[,3]) + - ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2*X[,3]) + - ((0.75 <= X[,1]) & (1 > X[,1])) * (3*X[,3]) -) -y <- f_XW + rnorm(n, 0, 1)*s_XW +# Generate synthetic data +n <- 1000 +snr <- 2 +x1 <- rnorm(n) +x2 <- rnorm(n) +x3 <- rnorm(n) +x4 <- as.numeric(rbinom(n,1,0.5)) +x5 <- as.numeric(sample(1:3,n,replace=TRUE)) +X <- cbind(x1,x2,x3,x4,x5) +p <- ncol(X) +mu_x <- mu1(X) +tau_x <- tau2(X) +pi_x <- 0.8*pnorm((3*mu_x/sd(mu_x)) - 0.5*X[,1]) + 0.05 + runif(n)/10 +Z <- rbinom(n,1,pi_x) +E_XZ <- mu_x + Z*tau_x +rfx_group_ids <- rep(c(1,2), n %/% 2) +rfx_coefs <- matrix(c(-1, -1, 1, 1), nrow=2, byrow=TRUE) +rfx_basis <- cbind(1, runif(n, -1, 1)) +rfx_term <- rowSums(rfx_coefs[rfx_group_ids,] * rfx_basis) +y <- E_XZ + rfx_term + rnorm(n, 0, 1)*(sd(E_XZ)/snr) +X <- as.data.frame(X) +X$x4 <- factor(X$x4, ordered = TRUE) +X$x5 <- factor(X$x5, ordered = TRUE) # Split data into test and train sets test_set_pct <- 0.2 @@ -235,23 +341,78 @@ n_test <- round(test_set_pct*n) n_train <- n - n_test test_inds <- sort(sample(1:n, n_test, replace = FALSE)) train_inds <- (1:n)[!((1:n) %in% test_inds)] -X_test <- as.data.frame(X[test_inds,]) -X_train <- as.data.frame(X[train_inds,]) +X_test <- X[test_inds,] +X_train <- X[train_inds,] +pi_test <- pi_x[test_inds] +pi_train <- pi_x[train_inds] +Z_test <- Z[test_inds] +Z_train <- Z[train_inds] y_test <- y[test_inds] y_train <- y[train_inds] -s_x_test <- s_XW[test_inds] -s_x_train <- s_XW[train_inds] +mu_test <- mu_x[test_inds] +mu_train <- mu_x[train_inds] +tau_test <- tau_x[test_inds] +tau_train <- tau_x[train_inds] +rfx_group_ids_test <- rfx_group_ids[test_inds] +rfx_group_ids_train <- rfx_group_ids[train_inds] +rfx_basis_test <- rfx_basis[test_inds,] +rfx_basis_train <- rfx_basis[train_inds,] +rfx_term_test <- rfx_term[test_inds] +rfx_term_train <- rfx_term[train_inds] ``` ## Python ```{python} -# Python implementation coming soon +random_seed = 1234 +rng = np.random.default_rng(random_seed) + +n = 1000 +snr = 2 +x1 = rng.standard_normal(n) +x2 = rng.standard_normal(n) +x3 = rng.standard_normal(n) +x4 = rng.binomial(1, 0.5, n).astype(float) +x5 = rng.choice([1, 2, 3], n).astype(float) +X = np.column_stack([x1, x2, x3, x4, x5]) +mu_x = mu1(X) +tau_x = tau2(X) +pi_x = 0.8 * norm.cdf((3 * mu_x / np.std(mu_x)) - 0.5 * X[:, 0]) + 0.05 + rng.uniform(size=n) / 10 +Z = rng.binomial(1, pi_x) +E_XZ = mu_x + Z * tau_x +rfx_group_ids = np.tile([1, 2], n // 2) # 1-indexed group IDs +rfx_coefs = np.array([[-1.0, -1.0], [1.0, 1.0]]) +rfx_basis = np.column_stack([np.ones(n), rng.uniform(-1, 1, n)]) +rfx_term = np.sum(rfx_coefs[rfx_group_ids - 1] * rfx_basis, axis=1) +y = E_XZ + rfx_term + rng.standard_normal(n) * (np.std(E_XZ) / snr) + +# Ordered categoricals +X_df = pd.DataFrame(X, columns=["x1", "x2", "x3", "x4", "x5"]) +X_df["x4"] = pd.Categorical(X_df["x4"].astype(int), categories=[0, 1], ordered=True) +X_df["x5"] = pd.Categorical(X_df["x5"].astype(int), categories=[1, 2, 3], ordered=True) + +# Train/test split +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test = X_df.iloc[test_inds] +X_train = X_df.iloc[train_inds] +pi_test = pi_x[test_inds] +pi_train = pi_x[train_inds] +Z_test = Z[test_inds] +Z_train = Z[train_inds] +y_test = y[test_inds] +y_train = y[train_inds] +rfx_group_ids_test = rfx_group_ids[test_inds] +rfx_group_ids_train = rfx_group_ids[train_inds] +rfx_basis_test = rfx_basis[test_inds] +rfx_basis_train = rfx_basis[train_inds] ``` :::: -Sample a BART model. +Sample a BCF model. ::::{.panel-tabset group="language"} @@ -261,63 +422,81 @@ Sample a BART model. num_gfr <- 10 num_burnin <- 0 num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 100, - alpha = 0.95, beta = 2, min_samples_leaf = 5) -variance_forest_params <- list(num_trees = 50, alpha = 0.95, - beta = 1.25, min_samples_leaf = 1) -bart_model <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, +prognostic_forest_params <- list(sample_sigma2_leaf = F) +treatment_effect_forest_params <- list(sample_sigma2_leaf = F) +bcf_model <- bcf( + X_train = X_train, Z_train = Z_train, y_train = y_train, propensity_train = pi_train, + rfx_group_ids_train = rfx_group_ids_train, rfx_basis_train = rfx_basis_train, + X_test = X_test, Z_test = Z_test, propensity_test = pi_test, + rfx_group_ids_test = rfx_group_ids_test, rfx_basis_test = rfx_basis_test, num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params + prognostic_forest_params = prognostic_forest_params, + treatment_effect_forest_params = treatment_effect_forest_params ) ``` ## Python ```{python} -# Python implementation coming soon +num_gfr = 10 +num_burnin = 0 +num_mcmc = 100 +bcf_model = BCFModel() +bcf_model.sample( + X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, + rfx_group_ids_train=rfx_group_ids_train, rfx_basis_train=rfx_basis_train, + X_test=X_test, Z_test=Z_test, propensity_test=pi_test, + rfx_group_ids_test=rfx_group_ids_test, rfx_basis_test=rfx_basis_test, + num_gfr=num_gfr, num_burnin=num_burnin, num_mcmc=num_mcmc, + general_params={"num_threads": 1}, + prognostic_forest_params={"sample_sigma2_leaf": False}, + treatment_effect_forest_params={"sample_sigma2_leaf": False}, +) ``` :::: ## Serialization -Save the BART model to disk. +Save the BCF model to disk. ::::{.panel-tabset group="language"} ## R ```{r} -saveBARTModelToJsonFile(bart_model, "bart.json") +saveBCFModelToJsonFile(bcf_model, "bcf_r.json") ``` ## Python ```{python} -# Python implementation coming soon +bcf_json_string = bcf_model.to_json() +with open("bcf_py.json", "w") as f: + json.dump(json.loads(bcf_json_string), f) ``` :::: ## Deserialization -Reload the BART model from disk. +Reload the BCF model from disk. ::::{.panel-tabset group="language"} ## R ```{r} -bart_model_reload <- createBARTModelFromJsonFile("bart.json") +bcf_model_reload <- createBCFModelFromJsonFile("bcf_r.json") ``` ## Python ```{python} -# Python implementation coming soon +with open("bcf_py.json", "r") as f: + bcf_json_reload = json.dumps(json.load(f)) +bcf_model_reload = BCFModel() +bcf_model_reload.from_json(bcf_json_reload) ``` :::: @@ -329,19 +508,48 @@ Check that the predictions align with those of the original model. ## R ```{r} -bart_preds_reload <- predict(bart_model_reload, X_train) -plot(rowMeans(bart_model$y_hat_train), rowMeans(bart_preds_reload$y_hat), - xlab = "Original", ylab = "Deserialized", main = "Conditional Mean Estimates") +bcf_preds_reload <- predict(bcf_model_reload, X_train, Z_train, pi_train, rfx_group_ids_train, rfx_basis_train) +plot(rowMeans(bcf_model$mu_hat_train), rowMeans(bcf_preds_reload$mu_hat), + xlab = "Original", ylab = "Deserialized", main = "Prognostic forest") abline(0,1,col="red",lwd=3,lty=3) -plot(rowMeans(bart_model$sigma2_x_hat_train), rowMeans(bart_preds_reload$variance_forest_predictions), - xlab = "Original", ylab = "Deserialized", main = "Conditional Variance Estimates") +plot(rowMeans(bcf_model$tau_hat_train), rowMeans(bcf_preds_reload$tau_hat), + xlab = "Original", ylab = "Deserialized", main = "Treatment forest") +abline(0,1,col="red",lwd=3,lty=3) +plot(rowMeans(bcf_model$y_hat_train), rowMeans(bcf_preds_reload$y_hat), + xlab = "Original", ylab = "Deserialized", main = "Overall outcome") abline(0,1,col="red",lwd=3,lty=3) ``` ## Python ```{python} -# Python implementation coming soon +bcf_preds_orig = bcf_model.predict( + X=X_train, Z=Z_train, propensity=pi_train, + rfx_group_ids=rfx_group_ids_train, rfx_basis=rfx_basis_train, + terms=["mu", "tau", "y_hat"], +) +bcf_preds_reload = bcf_model_reload.predict( + X=X_train, Z=Z_train, propensity=pi_train, + rfx_group_ids=rfx_group_ids_train, rfx_basis=rfx_basis_train, + terms=["mu", "tau", "y_hat"], +) + +fig, axes = plt.subplots(1, 3, figsize=(15, 5)) +for ax, term, title in zip( + axes, + ["mu_hat", "tau_hat", "y_hat"], + ["Prognostic forest", "Treatment forest", "Overall outcome"], +): + orig = bcf_preds_orig[term].mean(axis=1) + reload = bcf_preds_reload[term].mean(axis=1) + lo, hi = min(orig.min(), reload.min()), max(orig.max(), reload.max()) + ax.scatter(orig, reload, alpha=0.4, s=10) + ax.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) + ax.set_xlabel("Original") + ax.set_ylabel("Deserialized") + ax.set_title(title) +plt.tight_layout() +plt.show() ``` :::: diff --git a/vignettes/sklearn.qmd b/vignettes/sklearn.qmd index 106b500da..9b69116e1 100644 --- a/vignettes/sklearn.qmd +++ b/vignettes/sklearn.qmd @@ -1,64 +1,218 @@ --- title: "Scikit-Learn Interface" +execute: + freeze: auto # re-render only when source changes --- -Using `stochtree` via scikit-learn compatible estimators — -`StochTreeBARTRegressor`, `StochTreeBARTBinaryClassifier`, and -`StochTreeBCFRegressor` — for seamless integration with the sklearn ecosystem -(pipelines, cross-validation, grid search). +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + +This vignette is python-specific and no similar interface is implemented for R. -::: {.callout-note} -This vignette is under construction. Content will be ported from: +`stochtree.BARTModel` is fundamentally a Bayesian interface in which users specify a prior, provide data, sample from the posterior, and manage and inspect the resulting posterior samples. However, the basic BART model -- **Python**: `stochtree_repo/demo/notebooks/sklearn_wrappers.ipynb` -::: +$$y_i \sim \mathcal{N}\left(f(X_i), \sigma^2\right)$$ + +involves samples of a nonparametric function $f$ which estimates the expected +value of $y$ given $X$. Averaging over these draws, the posterior mean $\bar{f}$ +alone may satisfy some supervised learning use cases. To serve this use case +straightforwardly, `stochtree` offers +[scikit-learn-compatible estimator](https://scikit-learn.org/stable/developers/develop.html) +wrappers around `BARTModel` which implement the familiar `sklearn` API. + +- **`StochTreeBARTRegressor`**: continuous outcomes — provides `fit`, `predict`, + and `score` +- **`StochTreeBARTBinaryClassifier`**: binary outcomes via probit BART — + provides `fit`, `predict`, `predict_proba`, `decision_function`, and `score` +- Multi-class classification is supported by wrapping + [`OneVsRestClassifier`](https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html) + around `StochTreeBARTBinaryClassifier` ## Setup ```{python} -#| eval: false -from stochtree.sklearn import ( +import matplotlib.pyplot as plt +import numpy as np +from sklearn.datasets import load_wine, load_breast_cancer +from sklearn.model_selection import GridSearchCV +from sklearn.multiclass import OneVsRestClassifier +from stochtree import ( StochTreeBARTRegressor, StochTreeBARTBinaryClassifier, - StochTreeBCFRegressor, ) -import numpy as np -from sklearn.model_selection import cross_val_score -from sklearn.pipeline import Pipeline -from sklearn.preprocessing import StandardScaler ``` -## Regression with `StochTreeBARTRegressor` +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +## BART Regression + +We simulate simple regression data to demonstrate the continuous outcome case. + +```{python} +n = 100 +p = 10 +X = rng.normal(size=(n, p)) +y = X[:, 0] * 3 + rng.normal(size=n) +``` + +We fit a BART regression model by initializing a `StochTreeBARTRegressor` and +calling `fit()`. Since `BARTModel` is configured primarily through parameter +dictionaries, downstream parameters are passed through as such — here we only +specify the random seed. + +```{python} +reg = StochTreeBARTRegressor(general_params={"random_seed": random_seed, "num_threads": 1}) +reg.fit(X, y) +``` + +We can then predict from the model and compare posterior mean predictions to +the true outcome. + +```{python} +pred = reg.predict(X) +plt.scatter(pred, y) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.show() +``` + +We can also verify determinism by running the model again with the same seed +and comparing predictions. + +```{python} +reg2 = StochTreeBARTRegressor(general_params={"random_seed": random_seed, "num_threads": 1}) +reg2.fit(X, y) +pred2 = reg2.predict(X) +plt.scatter(pred, pred2) +plt.xlabel("First model") +plt.ylabel("Second model") +plt.show() +``` + +## Cross-Validating a BART Model + +While the default hyperparameters of `BARTModel` are designed to work well +out of the box, we can use posterior mean prediction error to cross-validate +the model's parameters. Below we use grid search to consider the effect of +several BART parameters: + +1. Number of GFR iterations (`num_gfr`) +2. Number of MCMC iterations (`num_mcmc`) +3. `num_trees`, `alpha`, and `beta` for the mean forest + +```{python} +param_grid = { + "num_gfr": [10, 40], + "num_mcmc": [0, 1000], + "mean_forest_params": [ + {"num_trees": 50, "alpha": 0.95, "beta": 2.0}, + {"num_trees": 100, "alpha": 0.90, "beta": 1.5}, + {"num_trees": 200, "alpha": 0.85, "beta": 1.0}, + ], +} +grid_search = GridSearchCV( + estimator=StochTreeBARTRegressor(general_params={"num_threads": 1}), + param_grid=param_grid, + cv=5, + scoring="r2", + n_jobs=-1, +) +grid_search.fit(X, y) +``` + +```{python} +cv_best_ind = np.argwhere(grid_search.cv_results_['rank_test_score'] == 1).item(0) +best_num_gfr = grid_search.cv_results_['param_num_gfr'][cv_best_ind].item(0) +best_num_mcmc = grid_search.cv_results_['param_num_mcmc'][cv_best_ind].item(0) +best_mean_forest_params = grid_search.cv_results_['param_mean_forest_params'][cv_best_ind] +best_num_trees = best_mean_forest_params['num_trees'] +best_alpha = best_mean_forest_params['alpha'] +best_beta = best_mean_forest_params['beta'] +print_message = f""" +Hyperparameters chosen by grid search: + num_gfr: {best_num_gfr} + num_mcmc: {best_num_mcmc} + num_trees: {best_num_trees} + alpha: {best_alpha} + beta: {best_beta} +""" +print(print_message) +``` + +## BART Classification + +### Binary Classification + +We load a binary outcome dataset from `sklearn`. ```{python} -#| eval: false -# Fit and predict +dataset = load_breast_cancer() +X = dataset.data +y = dataset.target ``` -## Classification with `StochTreeBARTBinaryClassifier` +We fit a binary classification model using `StochTreeBARTBinaryClassifier`. ```{python} -#| eval: false -# Fit and predict_proba +clf = StochTreeBARTBinaryClassifier(general_params={"random_seed": random_seed, "num_threads": 1}) +clf.fit(X=X, y=y) ``` -## Causal Inference with `StochTreeBCFRegressor` +In addition to class predictions, we can compute and visualize the predicted +probability of each class via `predict_proba()`. ```{python} -#| eval: false -# BCF via sklearn interface +probs = clf.predict_proba(X) +plt.hist(probs[:, 1], bins=30) +plt.xlabel("Predicted probability (class 1)") +plt.ylabel("Count") +plt.show() ``` -## Using sklearn Pipelines +### Multi-Class Classification + +For multi-class outcomes, we wrap `OneVsRestClassifier` around +`StochTreeBARTBinaryClassifier`. Here we use the Wine dataset, which has three +classes. ```{python} -#| eval: false -# Example pipeline with preprocessing +dataset = load_wine() +X = dataset.data +y = dataset.target +``` + +```{python} +clf = OneVsRestClassifier( + StochTreeBARTBinaryClassifier(general_params={"random_seed": random_seed, "num_threads": 1}) +) +clf.fit(X=X, y=y) ``` -## Cross-Validation +We visualize the histogram of predicted probabilities for each outcome category. ```{python} -#| eval: false -# cross_val_score with a stochtree estimator +fig, (ax1, ax2, ax3) = plt.subplots(3, 1) +fig.tight_layout(pad=3.0) +probs = clf.predict_proba(X) +ax1.hist(probs[y == 0, 0], bins=30) +ax1.set_title("Predicted Probabilities for Class 0") +ax1.set_xlim(0, 1) +ax2.hist(probs[y == 1, 1], bins=30) +ax2.set_title("Predicted Probabilities for Class 1") +ax2.set_xlim(0, 1) +ax3.hist(probs[y == 2, 2], bins=30) +ax3.set_title("Predicted Probabilities for Class 2") +ax3.set_xlim(0, 1) +plt.show() ``` diff --git a/vignettes/summary-plotting.qmd b/vignettes/summary-plotting.qmd index 6a0de91a6..0a8459e89 100644 --- a/vignettes/summary-plotting.qmd +++ b/vignettes/summary-plotting.qmd @@ -1,19 +1,53 @@ --- title: "Summary and Plotting" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + This vignette demonstrates the summary and plotting utilities available for `stochtree` models. # Setup +Load necessary packages + ::::{.panel-tabset group="language"} ## R ```{r} library(stochtree) +``` + +## Python + +```{python} +import numpy as np +import matplotlib.pyplot as plt +from stochtree import BARTModel, BCFModel, plot_parameter_trace +``` + +:::: + +Set a seed for reproducibility + +::::{.panel-tabset group="language"} + +## R + +```{r} random_seed = 1234 set.seed(random_seed) ``` @@ -21,7 +55,8 @@ set.seed(random_seed) ## Python ```{python} -# Python implementation coming soon +random_seed = 1234 +rng = np.random.default_rng(random_seed) ``` :::: @@ -54,7 +89,20 @@ y <- f_XW + rnorm(n, 0, 1) * noise_sd ## Python ```{python} -# Python implementation coming soon +n = 1000 +p_x = 10 +p_w = 1 +X = rng.uniform(size=(n, p_x)) +W = rng.uniform(size=(n, p_w)) +# R uses X[,10] (1-indexed) = Python X[:,9] +f_XW = ( + ((X[:, 9] >= 0) & (X[:, 9] < 0.25)) * (-7.5 * W[:, 0]) + + ((X[:, 9] >= 0.25) & (X[:, 9] < 0.5)) * (-2.5 * W[:, 0]) + + ((X[:, 9] >= 0.5) & (X[:, 9] < 0.75)) * ( 2.5 * W[:, 0]) + + ((X[:, 9] >= 0.75) & (X[:, 9] < 1.0)) * ( 7.5 * W[:, 0]) +) +noise_sd = 1.0 +y = f_XW + rng.standard_normal(n) * noise_sd ``` :::: @@ -84,7 +132,16 @@ bart_model <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +bart_model = BARTModel() +bart_model.sample( + X_train=X, + y_train=y, + leaf_basis_train=W, + num_gfr=10, + num_burnin=0, + num_mcmc=1000, + general_params={"num_threads": 1, "num_chains": 3}, +) ``` :::: @@ -102,7 +159,7 @@ print(bart_model) ## Python ```{python} -# Python implementation coming soon +print(bart_model) ``` :::: @@ -121,7 +178,7 @@ summary(bart_model) ## Python ```{python} -# Python implementation coming soon +print(bart_model.summary()) ``` :::: @@ -141,7 +198,8 @@ plot(bart_model) ## Python ```{python} -# Python implementation coming soon +ax = plot_parameter_trace(bart_model, term="global_error_scale") +plt.show() ``` :::: @@ -171,18 +229,23 @@ plot( ## Python ```{python} -# Python implementation coming soon +y_hat_train_samples = bart_model.extract_parameter("y_hat_train") +obs_index = 0 # 0-indexed (R uses 1) +fig, ax = plt.subplots() +ax.plot(y_hat_train_samples[obs_index, :]) +ax.set_title(f"In-Sample Predictions Traceplot, Observation {obs_index}") +ax.set_xlabel("Index") +ax.set_ylabel("Parameter Values") +plt.show() ``` :::: # Causal Inference -We now run the same demo for the causal inference use case served by the `bcf()` -function. +We now run the same demo for the causal inference use case served by the `bcf()` function in R and the `BCFModel` Python class. -Below we simulate a simple dataset for a causal inference problem with binary -treatment and continuous outcome. +Below we simulate a simple dataset for a causal inference problem with binary treatment and continuous outcome. ::::{.panel-tabset group="language"} @@ -208,12 +271,21 @@ y = mu_X + tau_X * Z + epsilon ## Python ```{python} -# Python implementation coming soon +n = 1000 +p_X = 5 +X = rng.uniform(size=(n, p_X)) +# R uses X[,1], X[,3], X[,2] (1-indexed) = Python X[:,0], X[:,2], X[:,1] +pi_X = 0.25 + 0.5 * X[:, 0] +Z = rng.binomial(1, pi_X, n).astype(float) +mu_X = pi_X * 5 + 2 * X[:, 2] +tau_X = X[:, 1] * 2 - 1 +epsilon = rng.standard_normal(n) +y = mu_X + tau_X * Z + epsilon ``` :::: -Now we fit a simple BCF model to the data. +Now we fit a simple BCF model to the data ::::{.panel-tabset group="language"} @@ -238,7 +310,17 @@ bcf_model <- stochtree::bcf( ## Python ```{python} -# Python implementation coming soon +bcf_model = BCFModel() +bcf_model.sample( + X_train=X, + Z_train=Z, + y_train=y, + propensity_train=pi_X, + num_gfr=10, + num_burnin=0, + num_mcmc=1000, + general_params={"num_threads": 1, "num_chains": 3}, +) ``` :::: @@ -256,13 +338,12 @@ print(bcf_model) ## Python ```{python} -# Python implementation coming soon +print(bcf_model) ``` :::: -For a more detailed summary (including the information above), we use the `summary()` -function. +For a more detailed summary (including the information above), we use the `summary()` function / method. ::::{.panel-tabset group="language"} @@ -275,14 +356,14 @@ summary(bcf_model) ## Python ```{python} -# Python implementation coming soon +print(bcf_model.summary()) ``` :::: -We can use the `plot()` function to produce a traceplot of model terms like the global -error scale $\sigma^2$ or (if $\sigma^2$ is not sampled) the first observation of -cached train set predictions. +In R, we have a `plot()` that produces a traceplot of model terms like the global error scale $\sigma^2$ or (if $\sigma^2$ is not sampled) the first observation of cached train set predictions. + +In Python, we provide a `plot_parameter_trace()` function for requesting a traceplot of a specific model parameter. ::::{.panel-tabset group="language"} @@ -295,18 +376,13 @@ plot(bcf_model) ## Python ```{python} -# Python implementation coming soon +ax = plot_parameter_trace(bcf_model, term="global_error_scale") +plt.show() ``` :::: -For finer-grained control over which parameters to plot, we can also use the -`extractParameter()` function to pull the posterior distribution of any valid model -term (e.g., global error scale $\sigma^2$, prognostic forest leaf scale -$\sigma^2_{\mu}$, CATE forest leaf scale $\sigma^2_{\tau}$, adaptive coding -parameters $b_0$ and $b_1$ for binary treatment, in-sample mean function predictions -`y_hat_train`, in-sample CATE function predictions `tau_hat_train`) and then plot any -subset or transformation of these values. +For finer-grained control over which parameters to plot, we can also use the `extractParameter()` function in R or the `extract_parameter()` method in Python to query the posterior distribution of any valid model term (e.g., global error scale $\sigma^2$, prognostic forest leaf scale $\sigma^2_{\mu}$, CATE forest leaf scale $\sigma^2_{\tau}$, adaptive coding parameters $b_0$ and $b_1$ for binary treatment, in-sample mean function predictions `y_hat_train`, in-sample CATE function predictions `tau_hat_train`) and then plot any subset or transformation of these values. ::::{.panel-tabset group="language"} @@ -335,7 +411,15 @@ legend( ## Python ```{python} -# Python implementation coming soon +adaptive_coding_samples = bcf_model.extract_parameter("adaptive_coding") +fig, ax = plt.subplots() +ax.plot(adaptive_coding_samples[0, :], color="blue", label="Control") +ax.plot(adaptive_coding_samples[1, :], color="orange", label="Treated") +ax.set_title("Adaptive Coding Parameter Traceplot") +ax.set_xlabel("Index") +ax.set_ylabel("Parameter Values") +ax.legend(loc="upper right") +plt.show() ``` :::: diff --git a/vignettes/tree-inspection.qmd b/vignettes/tree-inspection.qmd index 369cd778c..2ade74b65 100644 --- a/vignettes/tree-inspection.qmd +++ b/vignettes/tree-inspection.qmd @@ -1,8 +1,21 @@ --- title: "Tree Inspection" bibliography: vignettes.bib +execute: + freeze: auto # re-render only when source changes --- +```{r} +#| include: false +reticulate::use_python( + Sys.getenv( + "RETICULATE_PYTHON", + unset = file.path(here::here(), ".venv", "bin", "python") + ), + required = TRUE +) +``` + While out of sample evaluation and MCMC diagnostics on parametric BART components (i.e. $\sigma^2$, the global error variance) are helpful, it's important to be able to inspect the trees in a BART / BCF model. This vignette walks through some of the @@ -10,6 +23,8 @@ features `stochtree` provides to query and understand the forests and trees in a # Setup +Load necessary packages + ::::{.panel-tabset group="language"} ## R @@ -21,21 +36,42 @@ library(stochtree) ## Python ```{python} -# Python implementation coming soon +import numpy as np +import matplotlib.pyplot as plt +from stochtree import BARTModel +``` + +:::: + +Set a seed for reproducibility + +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed = 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) ``` :::: -# Demo 1: Supervised Learning +# Data Generation -Generate sample data where feature 10 is the only "important" feature. +Generate sample data where feature 10 is the only "important" feature ::::{.panel-tabset group="language"} ## R ```{r} -# Generate the data n <- 500 p_x <- 10 X <- matrix(runif(n*p_x), ncol = p_x) @@ -47,8 +83,34 @@ f_XW <- ( ) noise_sd <- 1 y <- f_XW + rnorm(n, 0, 1)*noise_sd +``` -# Split data into test and train sets +## Python + +```{python} +n = 500 +p_x = 10 +X = rng.uniform(size=(n, p_x)) +# Feature 10 (R) = feature index 9 (Python, 0-indexed) +f_XW = ( + ((X[:, 9] >= 0) & (X[:, 9] < 0.25)) * (-7.5) + + ((X[:, 9] >= 0.25) & (X[:, 9] < 0.5)) * (-2.5) + + ((X[:, 9] >= 0.5) & (X[:, 9] < 0.75)) * (2.5) + + ((X[:, 9] >= 0.75) & (X[:, 9] < 1.0)) * (7.5) +) +noise_sd = 1.0 +y = f_XW + rng.standard_normal(n) * noise_sd +``` + +:::: + +Split into train and test sets + +::::{.panel-tabset group="language"} + +## R + +```{r} test_set_pct <- 0.2 n_test <- round(test_set_pct*n) n_train <- n - n_test @@ -63,14 +125,20 @@ y_train <- y[train_inds] ## Python ```{python} -# Python implementation coming soon +n_test = round(0.2 * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test = X[test_inds] +X_train = X[train_inds] +y_test = y[test_inds] +y_train = y[train_inds] ``` :::: -## Sampling and Analysis +# Model Sampling -Run BART. +Sample a BART model with 10 GFR and 100 MCMC iterations ::::{.panel-tabset group="language"} @@ -91,36 +159,65 @@ bart_model <- stochtree::bart( ## Python ```{python} -# Python implementation coming soon +num_gfr = 10 +num_burnin = 0 +num_mcmc = 100 +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, y_train=y_train, X_test=X_test, + num_gfr=num_gfr, num_burnin=num_burnin, num_mcmc=num_mcmc, + general_params={"num_threads": 1, "keep_gfr": True}, +) ``` :::: -Inspect the MCMC samples. +# Model Inspection + +Assess the global error variance traceplot and test set prediction quality ::::{.panel-tabset group="language"} ## R ```{r} -plot(bart_model$sigma2_global_samples, ylab="sigma^2") +sigma2_samples <- extractParameter(bart_model, "sigma2_global") +plot(sigma2_samples, ylab="sigma^2") abline(h=noise_sd^2,col="red",lty=2,lwd=2.5) -plot(rowMeans(bart_model$y_hat_test), y_test, - pch=16, cex=0.75, xlab = "pred", ylab = "actual") +y_hat_test <- predict(bart_model, X=X_test, type="mean", terms="y_hat") +plot(y_hat_test, y_test, pch=16, cex=0.75, xlab = "pred", ylab = "actual") abline(0,1,col="red",lty=2,lwd=2.5) ``` ## Python ```{python} -# Python implementation coming soon +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) + +sigma2_samples = bart_model.extract_parameter("sigma2_global") +ax1.plot(sigma2_samples) +ax1.axhline(noise_sd**2, color="red", linestyle="dashed", linewidth=2) +ax1.set_ylabel(r"$\sigma^2$") + +y_hat_test = bart_model.predict(X=X_test, terms="y_hat", type="mean") +ax2.scatter(y_hat_test, y_test, s=15, alpha=0.6) +lo = min(y_hat_test.min(), y_test.min()) +hi = max(y_hat_test.max(), y_test.max()) +ax2.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +ax2.set_xlabel("pred") +ax2.set_ylabel("actual") + +plt.tight_layout() +plt.show() ``` :::: ## Variable Split Counts -Check the variable split count in the last GFR sample. +The `get_forest_split_counts` method of a BART model's internal forest objects allows us to compute the number of times each variable was used in a split rule across all trees in a given forest. + +Below we query this vector for the final GFR sample (1-indexed as 10 in R, 0-indexed as 9 in Python), where the second argument is the dimensionality of the covariates. ::::{.panel-tabset group="language"} @@ -133,11 +230,13 @@ bart_model$mean_forests$get_forest_split_counts(10, p_x) ## Python ```{python} -# Python implementation coming soon +bart_model.forest_container_mean.get_forest_split_counts(9, p_x) ``` :::: +We can also compute split counts for each feature aggregated over all forests + ::::{.panel-tabset group="language"} ## R @@ -149,14 +248,19 @@ bart_model$mean_forests$get_aggregate_split_counts(p_x) ## Python ```{python} -# Python implementation coming soon +bart_model.forest_container_mean.get_overall_split_counts(p_x) ``` :::: The split counts appear relatively uniform across features, so let's dig deeper and -look at individual trees, starting with the first tree in the last "grow-from-root" -sample. +look at individual trees. + +The `get_granular_split_counts` method returns a 3-dimensional array of shape `(num_forests, num_trees, num_features)`, where each entry represents the number of times a feature was used in a split for a specific tree in a specific forest. + +That is, we can count the number of times feature $k$ was split on in tree $j$ of forest $i$ by looking at the `(i,j,k)` entry of this array. + +Below we compute the split count for all features in the first tree of the last GFR sample in our model (noting again the use of 1-indexing in R and 0-indexing in Python). ::::{.panel-tabset group="language"} @@ -170,12 +274,13 @@ splits[10,1,] ## Python ```{python} -# Python implementation coming soon +splits = bart_model.forest_container_mean.get_granular_split_counts(p_x) +splits[9, 0, :] ``` :::: -This tree has a single split on the only "important" feature. Now, let's look at +This tree has a single split on the only "important" feature (10). Now, let's look at the second tree. ::::{.panel-tabset group="language"} @@ -189,11 +294,13 @@ splits[10,2,] ## Python ```{python} -# Python implementation coming soon +splits[9, 1, :] ``` :::: +And the 20th and 30th trees + ::::{.panel-tabset group="language"} ## R @@ -205,7 +312,7 @@ splits[10,20,] ## Python ```{python} -# Python implementation coming soon +splits[9, 19, :] ``` :::: @@ -221,7 +328,7 @@ splits[10,30,] ## Python ```{python} -# Python implementation coming soon +splits[9, 29, :] ``` :::: @@ -267,7 +374,24 @@ for (nid in nodes) { ## Python ```{python} -# Python implementation coming soon +forest_num = 9 +tree_num = 0 +fc = bart_model.forest_container_mean +nodes = np.sort(fc.nodes(forest_num, tree_num)) +for nid in nodes: + depth = fc.node_depth(forest_num, tree_num, nid) + indent = "\t" * depth + if fc.is_leaf_node(forest_num, tree_num, nid): + value = np.round(fc.node_leaf_values(forest_num, tree_num, nid), 3) + print(f"{indent}node={nid} is a leaf node with value={value}") + else: + + left = fc.left_child_node(forest_num, tree_num, nid) + feature = fc.node_split_index(forest_num, tree_num, nid) + threshold = round(fc.node_split_threshold(forest_num, tree_num, nid), 3) + right = fc.right_child_node(forest_num, tree_num, nid) + print(f"{indent}node={nid} is a split node, which tells us to go to node " + f"{left} if X[:, {feature}] <= {threshold} else to node {right}") ``` :::: diff --git a/vignettes/vignettes.bib b/vignettes/vignettes.bib index 65a6f152d..afe346880 100644 --- a/vignettes/vignettes.bib +++ b/vignettes/vignettes.bib @@ -1,3 +1,105 @@ +@article{friedman1991multivariate, + title={Multivariate adaptive regression splines}, + author={Friedman, Jerome H}, + journal={The annals of statistics}, + volume={19}, + number={1}, + pages={1--67}, + year={1991}, + publisher={Institute of Mathematical Statistics} +} + +@article{mcdonald1992effects, + title={Effects of computer reminders for influenza vaccination on morbidity during influenza epidemics.}, + author={McDonald, Clement J and Hui, Siu L and Tierney, William M}, + journal={MD computing: computers in medical practice}, + volume={9}, + number={5}, + pages={304--312}, + year={1992} +} + +@article{hirano2000assessing, + author = {Hirano, Keisuke and Imbens, Guido W. and Rubin, Donald B. and Zhou, Xiao-Hua}, + title = {Assessing the effect of an influenza vaccine in an + encouragement design }, + journal = {Biostatistics}, + volume = {1}, + number = {1}, + pages = {69-88}, + year = {2000}, + month = {03}, + issn = {1465-4644}, + doi = {10.1093/biostatistics/1.1.69}, + url = {https://doi.org/10.1093/biostatistics/1.1.69}, + eprint = {https://academic.oup.com/biostatistics/article-pdf/1/1/69/17744019/100069.pdf}, +} + +@incollection{richardson2011transparent, + author = {Richardson, Thomas S. and Evans, Robin J. and Robins, James M.}, + isbn = {9780199694587}, + title = {Transparent Parametrizations of Models for Potential Outcomes}, + booktitle = {Bayesian Statistics 9}, + publisher = {Oxford University Press}, + year = {2011}, + month = {10}, + doi = {10.1093/acprof:oso/9780199694587.003.0019}, + url = {https://doi.org/10.1093/acprof:oso/9780199694587.003.0019}, + eprint = {https://academic.oup.com/book/0/chapter/141661815/chapter-ag-pdf/45787772/book\_1879\_section\_141661815.ag.pdf}, +} + +@book{imbens2015causal, + place={Cambridge}, + title={Causal Inference for Statistics, Social, and Biomedical Sciences: An Introduction}, + publisher={Cambridge University Press}, + author={Imbens, Guido W. and Rubin, Donald B.}, + year={2015} +} + +@article{hahn2016bayesian, + title={A Bayesian partial identification approach to inferring the prevalence of accounting misconduct}, + author={Hahn, P Richard and Murray, Jared S and Manolopoulou, Ioanna}, + journal={Journal of the American Statistical Association}, + volume={111}, + number={513}, + pages={14--26}, + year={2016}, + publisher={Taylor \& Francis} +} + +@article{albert1993bayesian, + title={Bayesian analysis of binary and polychotomous response data}, + author={Albert, James H and Chib, Siddhartha}, + journal={Journal of the American statistical Association}, + volume={88}, + number={422}, + pages={669--679}, + year={1993}, + publisher={Taylor \& Francis} +} + +@article{papakostas2023forecasts, + title={Do forecasts of bankruptcy cause bankruptcy? A machine learning sensitivity analysis}, + author={Papakostas, Demetrios and Hahn, P Richard and Murray, Jared and Zhou, Frank and Gerakos, Joseph}, + journal={The Annals of Applied Statistics}, + volume={17}, + number={1}, + pages={711--739}, + year={2023}, + publisher={Institute of Mathematical Statistics} +} + +@article{lindo2010ability, + title={Ability, gender, and performance standards: Evidence from academic probation}, + author={Lindo, Jason M and Sanders, Nicholas J and Oreopoulos, Philip}, + journal={American economic journal: Applied economics}, + volume={2}, + number={2}, + pages={95--117}, + year={2010}, + publisher={American Economic Association} +} + @article{murray2021log, title={Log-linear Bayesian additive regression trees for multinomial logistic and count regression models}, author={Murray, Jared S}, From a5ccc6288e8a33fe274c1548bbbc9c96faf4cc11 Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Tue, 24 Mar 2026 01:16:28 -0500 Subject: [PATCH 4/8] Updated vignettes --- requirements.txt | 2 +- vignettes/bart.qmd | 2 +- vignettes/bcf.qmd | 407 +++++--------------------------- vignettes/custom-sampling.qmd | 2 +- vignettes/ensemble-kernel.qmd | 2 +- vignettes/heteroskedastic.qmd | 2 +- vignettes/index.qmd | 30 +-- vignettes/iv.qmd | 8 +- vignettes/multi-chain.qmd | 2 +- vignettes/multivariate-bcf.qmd | 2 +- vignettes/ordinal-outcome.qmd | 2 +- vignettes/prior-calibration.qmd | 2 +- vignettes/rdd.qmd | 10 +- vignettes/serialization.qmd | 2 +- vignettes/sklearn.qmd | 6 +- vignettes/summary-plotting.qmd | 2 +- vignettes/tree-inspection.qmd | 2 +- 17 files changed, 98 insertions(+), 387 deletions(-) diff --git a/requirements.txt b/requirements.txt index 382bb4851..a7255f35c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,5 +8,5 @@ matplotlib seaborn mkdocs-material mkdocstrings-python -mkdocs-jupyter +mkdocs-jupyter<0.25 arviz[all] diff --git a/vignettes/bart.qmd b/vignettes/bart.qmd index 64573c587..2a969f893 100644 --- a/vignettes/bart.qmd +++ b/vignettes/bart.qmd @@ -1,5 +1,5 @@ --- -title: "Supervised Learning with BART" +title: "Bayesian Additive Regression Trees for Supervised Learning" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/bcf.qmd b/vignettes/bcf.qmd index e24a0e98c..c392ab40e 100644 --- a/vignettes/bcf.qmd +++ b/vignettes/bcf.qmd @@ -1,5 +1,5 @@ --- -title: "Causal Inference with BCF" +title: "Bayesian Causal Forests for Treatment Effect Estimation" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes @@ -213,24 +213,15 @@ tau_test, tau_train = tau_x[test_inds], tau_x[train_inds] ### Sampling and Analysis -#### Warmstart - -We first simulate from an ensemble model of $y \mid X$ using "warm-start" -initialization samples (@krantsevich2023stochastic). This is the default in -`stochtree`. +We simulate from a BCF model using "warm-start" samples fit with the grow-from-root algorithm (@he2023stochastic, @krantsevich2023stochastic). This is the default in `stochtree`. ::::{.panel-tabset group="language"} ## R ```{r} -num_gfr <- 10 -num_burnin <- 0 -num_mcmc <- 100 -general_params <- list(keep_every = 5) -prognostic_forest_params <- list(sample_sigma2_leaf = F) -treatment_effect_forest_params <- list(sample_sigma2_leaf = F) -bcf_model_warmstart <- bcf( +general_params <- list(num_threads=1) +bcf_model <- bcf( X_train = X_train, Z_train = Z_train, y_train = y_train, @@ -238,26 +229,23 @@ bcf_model_warmstart <- bcf( X_test = X_test, Z_test = Z_test, propensity_test = pi_test, - num_gfr = num_gfr, - num_burnin = num_burnin, - num_mcmc = num_mcmc, - general_params = general_params, - prognostic_forest_params = prognostic_forest_params, - treatment_effect_forest_params = treatment_effect_forest_params + general_params = general_params ) ``` ## Python ```{python} -bcf_model_warmstart = BCFModel() -bcf_model_warmstart.sample( - X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, - X_test=X_test, Z_test=Z_test, propensity_test=pi_test, - num_gfr=10, num_burnin=0, num_mcmc=100, - general_params={"num_threads": 1, "keep_every": 5}, - prognostic_forest_params={"sample_sigma2_leaf": False}, - treatment_effect_forest_params={"sample_sigma2_leaf": False}, +bcf_model = BCFModel() +bcf_model.sample( + X_train=X_train, + Z_train=Z_train, + y_train=y_train, + propensity_train=pi_train, + X_test=X_test, + Z_test=Z_test, + propensity_test=pi_test, + general_params={"num_threads": 1}, ) ``` @@ -270,8 +258,10 @@ Inspect the samples initialized with an XBART warm-start. ## R ```{r} +mu_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "prognostic_function") +tau_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "cate") plot( - rowMeans(bcf_model_warmstart$mu_hat_test), + rowMeans(mu_hat_test), mu_test, xlab = "predicted", ylab = "actual", @@ -279,7 +269,7 @@ plot( ) abline(0, 1, col = "red", lty = 3, lwd = 3) plot( - rowMeans(bcf_model_warmstart$tau_hat_test), + rowMeans(tau_hat_test), tau_test, xlab = "predicted", ylab = "actual", @@ -287,12 +277,13 @@ plot( ) abline(0, 1, col = "red", lty = 3, lwd = 3) sigma_observed <- var(y - E_XZ) +sigma2_global_samples <- extractParameter(bcf_model, "sigma2_global") plot_bounds <- c( - min(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)), - max(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)) + min(c(sigma2_global_samples, sigma_observed)), + max(c(sigma2_global_samples, sigma_observed)) ) plot( - bcf_model_warmstart$sigma2_global_samples, + sigma2_global_samples, ylim = plot_bounds, ylab = "sigma^2", xlab = "Sample", @@ -304,159 +295,25 @@ abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ## Python ```{python} +mu_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="prognostic_function") sigma_observed = np.var(y - E_XZ) - -mu_pred = bcf_model_warmstart.mu_hat_test.mean(axis=1) -lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) -plt.scatter(mu_pred, mu_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") -plt.show() - -tau_pred = bcf_model_warmstart.tau_hat_test.mean(axis=1) -lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) -plt.scatter(tau_pred, tau_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") -plt.show() - -plt.plot(bcf_model_warmstart.global_var_samples) -plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) -plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") -plt.show() -``` - -:::: - -Examine test set interval coverage. - -::::{.panel-tabset group="language"} - -## R - -```{r} -test_lb <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.025) -test_ub <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.975) -cover <- ((test_lb <= tau_x[test_inds]) & - (test_ub >= tau_x[test_inds])) -mean(cover) -``` - -## Python - -```{python} -test_lb = np.quantile(bcf_model_warmstart.tau_hat_test, 0.025, axis=1) -test_ub = np.quantile(bcf_model_warmstart.tau_hat_test, 0.975, axis=1) -cover = (test_lb <= tau_test) & (test_ub >= tau_test) -print(f"Coverage: {cover.mean():.3f}") -``` - -:::: - -#### BART MCMC without Warmstart - -Next, we simulate from this ensemble model without any warm-start initialization. - -::::{.panel-tabset group="language"} - -## R - -```{r} -num_gfr <- 0 -num_burnin <- 2000 -num_mcmc <- 100 -general_params <- list(keep_every = 5) -prognostic_forest_params <- list(sample_sigma2_leaf = F) -treatment_effect_forest_params <- list(sample_sigma2_leaf = F) -bcf_model_root <- bcf( - X_train = X_train, - Z_train = Z_train, - y_train = y_train, - propensity_train = pi_train, - X_test = X_test, - Z_test = Z_test, - propensity_test = pi_test, - num_gfr = num_gfr, - num_burnin = num_burnin, - num_mcmc = num_mcmc, - general_params = general_params, - prognostic_forest_params = prognostic_forest_params, - treatment_effect_forest_params = treatment_effect_forest_params -) -``` - -## Python - -```{python} -bcf_model_root = BCFModel() -bcf_model_root.sample( - X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, - X_test=X_test, Z_test=Z_test, propensity_test=pi_test, - num_gfr=0, num_burnin=2000, num_mcmc=100, - general_params={"num_threads": 1, "keep_every": 5}, - prognostic_forest_params={"sample_sigma2_leaf": False}, - treatment_effect_forest_params={"sample_sigma2_leaf": False}, -) -``` - -:::: - -Inspect the samples after burnin. - -::::{.panel-tabset group="language"} - -## R - -```{r} -plot( - rowMeans(bcf_model_root$mu_hat_test), - mu_test, - xlab = "predicted", - ylab = "actual", - main = "Prognostic function" -) -abline(0, 1, col = "red", lty = 3, lwd = 3) -plot( - rowMeans(bcf_model_root$tau_hat_test), - tau_test, - xlab = "predicted", - ylab = "actual", - main = "Treatment effect" -) -abline(0, 1, col = "red", lty = 3, lwd = 3) -sigma_observed <- var(y - E_XZ) -plot_bounds <- c( - min(c(bcf_model_root$sigma2_global_samples, sigma_observed)), - max(c(bcf_model_root$sigma2_global_samples, sigma_observed)) -) -plot( - bcf_model_root$sigma2_global_samples, - ylim = plot_bounds, - ylab = "sigma^2", - xlab = "Sample", - main = "Global variance parameter" -) -abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") -``` - -## Python - -```{python} -mu_pred = bcf_model_root.mu_hat_test.mean(axis=1) +mu_pred = mu_hat_test.mean(axis=1) lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) plt.scatter(mu_pred, mu_test, alpha=0.5) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") plt.show() -tau_pred = bcf_model_root.tau_hat_test.mean(axis=1) +tau_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="cate") +tau_pred = tau_hat_test.mean(axis=1) lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) plt.scatter(tau_pred, tau_test, alpha=0.5) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") plt.show() -plt.plot(bcf_model_root.global_var_samples) +global_var_samples = bcf_model.extract_parameter("sigma2_global") +plt.plot(global_var_samples) plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") plt.show() @@ -471,8 +328,8 @@ Examine test set interval coverage. ## R ```{r} -test_lb <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.025) -test_ub <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.975) +test_lb <- apply(tau_hat_test, 1, quantile, 0.025) +test_ub <- apply(tau_hat_test, 1, quantile, 0.975) cover <- ((test_lb <= tau_x[test_inds]) & (test_ub >= tau_x[test_inds])) mean(cover) @@ -481,8 +338,8 @@ mean(cover) ## Python ```{python} -test_lb = np.quantile(bcf_model_root.tau_hat_test, 0.025, axis=1) -test_ub = np.quantile(bcf_model_root.tau_hat_test, 0.975, axis=1) +test_lb = np.quantile(tau_hat_test, 0.025, axis=1) +test_ub = np.quantile(tau_hat_test, 0.975, axis=1) cover = (test_lb <= tau_test) & (test_ub >= tau_test) print(f"Coverage: {cover.mean():.3f}") ``` @@ -599,20 +456,15 @@ tau_test, tau_train = tau_x[test_inds], tau_x[train_inds] ### Sampling and Analysis -#### Warmstart +We simulate from a BCF model using default settings. ::::{.panel-tabset group="language"} ## R ```{r} -num_gfr <- 10 -num_burnin <- 0 -num_mcmc <- 100 -general_params <- list(keep_every = 5) -prognostic_forest_params <- list(sample_sigma2_leaf = F) -treatment_effect_forest_params <- list(sample_sigma2_leaf = F) -bcf_model_warmstart <- bcf( +general_params <- list(num_threads = 1) +bcf_model <- bcf( X_train = X_train, Z_train = Z_train, y_train = y_train, @@ -620,26 +472,23 @@ bcf_model_warmstart <- bcf( X_test = X_test, Z_test = Z_test, propensity_test = pi_test, - num_gfr = num_gfr, - num_burnin = num_burnin, - num_mcmc = num_mcmc, - general_params = general_params, - prognostic_forest_params = prognostic_forest_params, - treatment_effect_forest_params = treatment_effect_forest_params + general_params = general_params ) ``` ## Python ```{python} -bcf_model_warmstart = BCFModel() -bcf_model_warmstart.sample( - X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, - X_test=X_test, Z_test=Z_test, propensity_test=pi_test, - num_gfr=10, num_burnin=0, num_mcmc=100, - general_params={"num_threads": 1, "keep_every": 5}, - prognostic_forest_params={"sample_sigma2_leaf": False}, - treatment_effect_forest_params={"sample_sigma2_leaf": False}, +bcf_model = BCFModel() +bcf_model.sample( + X_train=X_train, + Z_train=Z_train, + y_train=y_train, + propensity_train=pi_train, + X_test=X_test, + Z_test=Z_test, + propensity_test=pi_test, + general_params={"num_threads": 1}, ) ``` @@ -650,8 +499,10 @@ bcf_model_warmstart.sample( ## R ```{r} +mu_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "prognostic_function") +tau_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "cate") plot( - rowMeans(bcf_model_warmstart$mu_hat_test), + rowMeans(mu_hat_test), mu_test, xlab = "predicted", ylab = "actual", @@ -659,20 +510,21 @@ plot( ) abline(0, 1, col = "red", lty = 3, lwd = 3) plot( - rowMeans(bcf_model_warmstart$tau_hat_test), + rowMeans(tau_hat_test), tau_test, xlab = "predicted", ylab = "actual", main = "Treatment effect" ) abline(0, 1, col = "red", lty = 3, lwd = 3) +sigma2_global_samples <- extractParameter(bcf_model, "sigma2_global") sigma_observed <- var(y - E_XZ) plot_bounds <- c( - min(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)), - max(c(bcf_model_warmstart$sigma2_global_samples, sigma_observed)) + min(c(sigma2_global_samples, sigma_observed)), + max(c(sigma2_global_samples, sigma_observed)) ) plot( - bcf_model_warmstart$sigma2_global_samples, + sigma2_global_samples, ylim = plot_bounds, ylab = "sigma^2", xlab = "Sample", @@ -684,155 +536,26 @@ abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ## Python ```{python} +mu_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="prognostic_function") +tau_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="cate") sigma_observed = np.var(y - E_XZ) -mu_pred = bcf_model_warmstart.mu_hat_test.mean(axis=1) -lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) -plt.scatter(mu_pred, mu_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") -plt.show() - -tau_pred = bcf_model_warmstart.tau_hat_test.mean(axis=1) -lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) -plt.scatter(tau_pred, tau_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") -plt.show() - -plt.plot(bcf_model_warmstart.global_var_samples) -plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) -plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") -plt.show() -``` - -:::: - -Examine test set interval coverage. - -::::{.panel-tabset group="language"} - -## R - -```{r} -test_lb <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.025) -test_ub <- apply(bcf_model_warmstart$tau_hat_test, 1, quantile, 0.975) -cover <- ((test_lb <= tau_x[test_inds]) & - (test_ub >= tau_x[test_inds])) -mean(cover) -``` - -## Python - -```{python} -test_lb = np.quantile(bcf_model_warmstart.tau_hat_test, 0.025, axis=1) -test_ub = np.quantile(bcf_model_warmstart.tau_hat_test, 0.975, axis=1) -cover = (test_lb <= tau_test) & (test_ub >= tau_test) -print(f"Coverage: {cover.mean():.3f}") -``` - -:::: - -#### BART MCMC without Warmstart - -::::{.panel-tabset group="language"} - -## R - -```{r} -num_gfr <- 0 -num_burnin <- 2000 -num_mcmc <- 100 -general_params <- list(keep_every = 5) -prognostic_forest_params <- list(sample_sigma2_leaf = F) -treatment_effect_forest_params <- list(sample_sigma2_leaf = F) -bcf_model_root <- bcf( - X_train = X_train, - Z_train = Z_train, - y_train = y_train, - propensity_train = pi_train, - X_test = X_test, - Z_test = Z_test, - propensity_test = pi_test, - num_gfr = num_gfr, - num_burnin = num_burnin, - num_mcmc = num_mcmc, - general_params = general_params, - prognostic_forest_params = prognostic_forest_params, - treatment_effect_forest_params = treatment_effect_forest_params -) -``` - -## Python - -```{python} -bcf_model_root = BCFModel() -bcf_model_root.sample( - X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, - X_test=X_test, Z_test=Z_test, propensity_test=pi_test, - num_gfr=0, num_burnin=2000, num_mcmc=100, - general_params={"num_threads": 1, "keep_every": 5}, - prognostic_forest_params={"sample_sigma2_leaf": False}, - treatment_effect_forest_params={"sample_sigma2_leaf": False}, -) -``` - -:::: - -::::{.panel-tabset group="language"} - -## R - -```{r} -plot( - rowMeans(bcf_model_root$mu_hat_test), - mu_test, - xlab = "predicted", - ylab = "actual", - main = "Prognostic function" -) -abline(0, 1, col = "red", lty = 3, lwd = 3) -plot( - rowMeans(bcf_model_root$tau_hat_test), - tau_test, - xlab = "predicted", - ylab = "actual", - main = "Treatment effect" -) -abline(0, 1, col = "red", lty = 3, lwd = 3) -sigma_observed <- var(y - E_XZ) -plot_bounds <- c( - min(c(bcf_model_root$sigma2_global_samples, sigma_observed)), - max(c(bcf_model_root$sigma2_global_samples, sigma_observed)) -) -plot( - bcf_model_root$sigma2_global_samples, - ylim = plot_bounds, - ylab = "sigma^2", - xlab = "Sample", - main = "Global variance parameter" -) -abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") -``` - -## Python - -```{python} -mu_pred = bcf_model_root.mu_hat_test.mean(axis=1) +mu_pred = mu_hat_test.mean(axis=1) lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) plt.scatter(mu_pred, mu_test, alpha=0.5) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") plt.show() -tau_pred = bcf_model_root.tau_hat_test.mean(axis=1) +tau_pred = tau_hat_test.mean(axis=1) lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) plt.scatter(tau_pred, tau_test, alpha=0.5) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") plt.show() -plt.plot(bcf_model_root.global_var_samples) +sigma2_global_samples = bcf_model.extract_parameter("sigma2_global") +plt.plot(sigma2_global_samples) plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") plt.show() @@ -847,8 +570,8 @@ Examine test set interval coverage. ## R ```{r} -test_lb <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.025) -test_ub <- apply(bcf_model_root$tau_hat_test, 1, quantile, 0.975) +test_lb <- apply(tau_hat_test, 1, quantile, 0.025) +test_ub <- apply(tau_hat_test, 1, quantile, 0.975) cover <- ((test_lb <= tau_x[test_inds]) & (test_ub >= tau_x[test_inds])) mean(cover) @@ -857,8 +580,8 @@ mean(cover) ## Python ```{python} -test_lb = np.quantile(bcf_model_root.tau_hat_test, 0.025, axis=1) -test_ub = np.quantile(bcf_model_root.tau_hat_test, 0.975, axis=1) +test_lb = np.quantile(tau_hat_test, 0.025, axis=1) +test_ub = np.quantile(tau_hat_test, 0.975, axis=1) cover = (test_lb <= tau_test) & (test_ub >= tau_test) print(f"Coverage: {cover.mean():.3f}") ``` diff --git a/vignettes/custom-sampling.qmd b/vignettes/custom-sampling.qmd index 046d5bbf7..2dc5c3b8b 100644 --- a/vignettes/custom-sampling.qmd +++ b/vignettes/custom-sampling.qmd @@ -1,5 +1,5 @@ --- -title: "Custom Sampling Routine" +title: "Building a Custom Gibbs Sampler with Stochtree Primitives" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/ensemble-kernel.qmd b/vignettes/ensemble-kernel.qmd index 7c45aa8a3..eb3db0e42 100644 --- a/vignettes/ensemble-kernel.qmd +++ b/vignettes/ensemble-kernel.qmd @@ -1,5 +1,5 @@ --- -title: "Ensemble Kernel" +title: "Using Shared Leaf Membership as a Kernel" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/heteroskedastic.qmd b/vignettes/heteroskedastic.qmd index 6c1beacca..eb443a5cc 100644 --- a/vignettes/heteroskedastic.qmd +++ b/vignettes/heteroskedastic.qmd @@ -1,5 +1,5 @@ --- -title: "Heteroskedastic BART" +title: "BART with a Forest-based Variance Model" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/index.qmd b/vignettes/index.qmd index 3a9d83eb1..e8b63888e 100644 --- a/vignettes/index.qmd +++ b/vignettes/index.qmd @@ -10,33 +10,33 @@ R and Python implementations side-by-side. | Vignette | Description | |---|---| -| [BART](bart.qmd) | Bayesian Additive Regression Trees for supervised learning | -| [BCF](bcf.qmd) | Bayesian Causal Forests for heterogeneous treatment effects | -| [Heteroskedastic BART](heteroskedastic.qmd) | BART with a forest-modeled error variance | -| [Multi-Chain Inference](multi-chain.qmd) | Running and combining multiple MCMC chains | -| [Ordinal Outcome Modeling](ordinal-outcome.qmd) | BART for ordinal responses | -| [Multivariate Treatment BCF](multivariate-bcf.qmd) | BCF with vector-valued treatments | +| [BART](bart.qmd) | Bayesian Additive Regression Trees for Supervised Learning | +| [BCF](bcf.qmd) | Bayesian Causal Forests for Treatment Effect Estimation | +| [Heteroskedastic BART](heteroskedastic.qmd) | BART with a Forest-based Variance Model | +| [Ordinal Outcome Modeling](ordinal-outcome.qmd) | BART with the Complementary Log-Log Link for Ordinal Outcomes | +| [Multivariate Treatment BCF](multivariate-bcf.qmd) | BCF with Vector-valued Treatments | ## Practical Topics | Vignette | Description | |---|---| -| [Model Serialization](serialization.qmd) | Saving and loading fitted models | -| [Tree Inspection](tree-inspection.qmd) | Examining individual trees in a fitted ensemble | -| [Summary and Plotting](summary-plotting.qmd) | Posterior summary utilities | -| [Prior Calibration](prior-calibration.qmd) | Choosing and calibrating tree priors | -| [Scikit-Learn Interface](sklearn.qmd) | Using stochtree via sklearn-compatible estimators | +| [Model Serialization](serialization.qmd) | Saving and Loading Fitted Models | +| [Multi-Chain Inference](multi-chain.qmd) | Running and Combining Multiple MCMC Chains | +| [Tree Inspection](tree-inspection.qmd) | Examining Individual Trees in a Fitted Ensemble | +| [Summary and Plotting](summary-plotting.qmd) | Posterior Summary and Visualization Utilities | +| [Prior Calibration](prior-calibration.qmd) | Calibrating Leaf Node Scale Parameter Priors | +| [Scikit-Learn Interface](sklearn.qmd) | Using Stochtree via Sklearn-Compatible Estimators in Python | ## Low-Level Interface | Vignette | Description | |---|---| -| [Custom Sampling Routine](custom-sampling.qmd) | Building a custom Gibbs sampler with stochtree primitives | -| [Ensemble Kernel](ensemble-kernel.qmd) | Using the tree ensemble as a kernel | +| [Custom Sampling Routine](custom-sampling.qmd) | Building a Custom Gibbs Sampler with Stochtree Primitives | +| [Ensemble Kernel](ensemble-kernel.qmd) | Using Shared Leaf Membership as a Kernel | ## Advanced Methods | Vignette | Description | |---|---| -| [Regression Discontinuity Design](rdd.qmd) | BARDDT: leaf-regression BART for RDD | -| [Instrumental Variables](iv.qmd) | IV analysis via a custom monotone probit Gibbs sampler | +| [Regression Discontinuity Design](rdd.qmd) | BARDDT: Leaf-Regression BART for RDD | +| [Instrumental Variables](iv.qmd) | IV Analysis via a Custom Monotone Probit Gibbs Sampler | \ No newline at end of file diff --git a/vignettes/iv.qmd b/vignettes/iv.qmd index b41fb12c3..1f21c6c05 100644 --- a/vignettes/iv.qmd +++ b/vignettes/iv.qmd @@ -1,11 +1,5 @@ --- -title: "Instrumental Variables (IV) with stochtree" -author: - - name: P. Richard Hahn - affiliation: Arizona State University - - name: Drew Herren - affiliation: University of Texas at Austin -date: today +title: "Instrumental Variables (IV) with StochTree" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/multi-chain.qmd b/vignettes/multi-chain.qmd index 92b08d40e..beed218e4 100644 --- a/vignettes/multi-chain.qmd +++ b/vignettes/multi-chain.qmd @@ -1,5 +1,5 @@ --- -title: "Multi-Chain Inference" +title: "Running and Combining Multiple MCMC Chains" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/multivariate-bcf.qmd b/vignettes/multivariate-bcf.qmd index 60ac05fb3..90b07e2d7 100644 --- a/vignettes/multivariate-bcf.qmd +++ b/vignettes/multivariate-bcf.qmd @@ -1,5 +1,5 @@ --- -title: "Multivariate Treatment BCF" +title: "BCF with Vector-valued Treatments" execute: freeze: auto # re-render only when source changes --- diff --git a/vignettes/ordinal-outcome.qmd b/vignettes/ordinal-outcome.qmd index b92c34592..725a8f19f 100644 --- a/vignettes/ordinal-outcome.qmd +++ b/vignettes/ordinal-outcome.qmd @@ -1,5 +1,5 @@ --- -title: "Ordinal Outcome Modeling" +title: "BART with the Complementary Log-Log Link for Ordinal Outcomes" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/prior-calibration.qmd b/vignettes/prior-calibration.qmd index cb6be2cb2..09a4ea0e4 100644 --- a/vignettes/prior-calibration.qmd +++ b/vignettes/prior-calibration.qmd @@ -1,5 +1,5 @@ --- -title: "Prior Calibration" +title: "Calibrating Leaf Node Scale Parameter Priors" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/rdd.qmd b/vignettes/rdd.qmd index ba4c959f1..1a11604c8 100644 --- a/vignettes/rdd.qmd +++ b/vignettes/rdd.qmd @@ -1,13 +1,5 @@ --- -title: "Regression Discontinuity Design (RDD) with stochtree" -author: - - name: Rafael Alcantara - affiliation: University of Texas at Austin - - name: P. Richard Hahn - affiliation: Arizona State University - - name: Drew Herren - affiliation: University of Texas at Austin -date: today +title: "Regression Discontinuity Design (RDD) with StochTree" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/serialization.qmd b/vignettes/serialization.qmd index f92b4a5aa..e0d7bcd59 100644 --- a/vignettes/serialization.qmd +++ b/vignettes/serialization.qmd @@ -1,5 +1,5 @@ --- -title: "Model Serialization" +title: "Saving and Loading Fitted Models" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/sklearn.qmd b/vignettes/sklearn.qmd index 9b69116e1..839cbeab1 100644 --- a/vignettes/sklearn.qmd +++ b/vignettes/sklearn.qmd @@ -1,5 +1,5 @@ --- -title: "Scikit-Learn Interface" +title: "Using Stochtree via Sklearn-Compatible Estimators in Python" execute: freeze: auto # re-render only when source changes --- @@ -126,11 +126,13 @@ grid_search = GridSearchCV( param_grid=param_grid, cv=5, scoring="r2", - n_jobs=-1, + n_jobs=1, # n_jobs=-1 deadlocks when stochtree's C++ thread pool is active ) grid_search.fit(X, y) ``` +Note that we set `n_jobs=1` above to avoid deadlocks arising from interactions between `reticulate` (which renders these python vignettes), `joblib`, and stochtree's own C++ multithreading model. Users running this vignette interactively or as a script do not need to fix `n_jobs=1`. + ```{python} cv_best_ind = np.argwhere(grid_search.cv_results_['rank_test_score'] == 1).item(0) best_num_gfr = grid_search.cv_results_['param_num_gfr'][cv_best_ind].item(0) diff --git a/vignettes/summary-plotting.qmd b/vignettes/summary-plotting.qmd index 0a8459e89..516ed1c89 100644 --- a/vignettes/summary-plotting.qmd +++ b/vignettes/summary-plotting.qmd @@ -1,5 +1,5 @@ --- -title: "Summary and Plotting" +title: "Posterior Summary and Visualization Utilities" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes diff --git a/vignettes/tree-inspection.qmd b/vignettes/tree-inspection.qmd index 2ade74b65..e7af380d8 100644 --- a/vignettes/tree-inspection.qmd +++ b/vignettes/tree-inspection.qmd @@ -1,5 +1,5 @@ --- -title: "Tree Inspection" +title: "Examining Individual Trees in a Fitted Ensemble" bibliography: vignettes.bib execute: freeze: auto # re-render only when source changes From b315e0e2d28613842bdd3beb3fed0e438f02c355 Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Thu, 26 Mar 2026 02:19:34 -0500 Subject: [PATCH 5/8] Overhaul doc site to use quarto instead of mkdocs --- .github/workflows/docs.yml | 42 +- .gitignore | 14 + .here | 0 README.md | 75 ++- _quarto.yml | 155 +++++ about.qmd | 105 ++++ assets/api.css | 37 ++ assets/custom.scss | 16 + development/contributing.qmd | 277 +++++++++ development/index.qmd | 9 + development/new-models.qmd | 273 +++++++++ development/roadmap.qmd | 23 + getting-started.qmd | 182 ++++++ index.qmd | 45 ++ vignettes/R/IV/IV_CDAG.png | Bin 0 -> 135771 bytes vignettes/R/RDD/RDD_DAG.png | Bin 0 -> 38039 bytes vignettes/R/RDD/trees1.png | Bin 0 -> 40369 bytes vignettes/R/RDD/trees2.png | Bin 0 -> 24832 bytes vignettes/R/RDD/trees3.png | Bin 0 -> 25988 bytes vignettes/bart.qmd | 153 ++++- vignettes/bcf.qmd | 439 +++++++++---- vignettes/custom-sampling.qmd | 1022 +++++++++++++++++++++++-------- vignettes/ensemble-kernel.qmd | 35 +- vignettes/heteroskedastic.qmd | 893 ++++++++++----------------- vignettes/iv.qmd | 2 +- vignettes/multi-chain.qmd | 2 +- vignettes/multivariate-bcf.qmd | 313 ++++++---- vignettes/ordinal-outcome.qmd | 2 +- vignettes/prior-calibration.qmd | 2 +- vignettes/rdd.qmd | 2 +- vignettes/serialization.qmd | 2 +- vignettes/sklearn.qmd | 2 +- vignettes/summary-plotting.qmd | 32 +- vignettes/tree-inspection.qmd | 2 +- vignettes/vignettes.bib | 8 + 35 files changed, 2993 insertions(+), 1171 deletions(-) create mode 100644 .here create mode 100644 _quarto.yml create mode 100644 about.qmd create mode 100644 assets/api.css create mode 100644 assets/custom.scss create mode 100644 development/contributing.qmd create mode 100644 development/index.qmd create mode 100644 development/new-models.qmd create mode 100644 development/roadmap.qmd create mode 100644 getting-started.qmd create mode 100644 index.qmd create mode 100644 vignettes/R/IV/IV_CDAG.png create mode 100644 vignettes/R/RDD/RDD_DAG.png create mode 100644 vignettes/R/RDD/trees1.png create mode 100644 vignettes/R/RDD/trees2.png create mode 100644 vignettes/R/RDD/trees3.png diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 1c6367d7e..f4fc96d22 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -61,6 +61,9 @@ jobs: pip install . cd .. + - name: Set RETICULATE_PYTHON for vignette rendering + run: echo "RETICULATE_PYTHON=$(which python)" >> $GITHUB_ENV + - name: Setup pandoc uses: r-lib/actions/setup-pandoc@v2 @@ -79,7 +82,7 @@ jobs: - name: Build R doc site run: | cd stochtree_repo - Rscript cran-bootstrap.R 1 1 1 + Rscript cran-bootstrap.R 0 1 0 cd .. mkdir -p docs/R_docs/pkgdown Rscript -e 'pkgdown::build_site_github_pages("stochtree_repo/stochtree_cran", dest_dir = "../../docs/R_docs/pkgdown", install = TRUE)' @@ -90,39 +93,22 @@ jobs: Rscript cran-cleanup.R cd .. - - name: Copy Jupyter notebook demos over to docs directory - run: | - cp stochtree_repo/demo/notebooks/supervised_learning.ipynb docs/python_docs/demo/supervised_learning.ipynb - cp stochtree_repo/demo/notebooks/causal_inference.ipynb docs/python_docs/demo/causal_inference.ipynb - cp stochtree_repo/demo/notebooks/heteroskedastic_supervised_learning.ipynb docs/python_docs/demo/heteroskedastic_supervised_learning.ipynb - cp stochtree_repo/demo/notebooks/multivariate_treatment_causal_inference.ipynb docs/python_docs/demo/multivariate_treatment_causal_inference.ipynb - cp stochtree_repo/demo/notebooks/serialization.ipynb docs/python_docs/demo/serialization.ipynb - cp stochtree_repo/demo/notebooks/tree_inspection.ipynb docs/python_docs/demo/tree_inspection.ipynb - cp stochtree_repo/demo/notebooks/summary.ipynb docs/python_docs/demo/summary.ipynb - cp stochtree_repo/demo/notebooks/ordinal_outcome.ipynb docs/python_docs/demo/ordinal_outcome.ipynb - cp stochtree_repo/demo/notebooks/prototype_interface.ipynb docs/python_docs/demo/prototype_interface.ipynb - cp stochtree_repo/demo/notebooks/sklearn_wrappers.ipynb docs/python_docs/demo/sklearn_wrappers.ipynb - cp stochtree_repo/demo/notebooks/multi_chain.ipynb docs/python_docs/demo/multi_chain.ipynb + - name: Install Quarto + uses: quarto-dev/quarto-actions/setup@v2 + + - name: Install quartodoc + run: pip install quartodoc "griffe<1.0" + + - name: Regenerate Python API reference pages + run: quartodoc build - - name: Copy static vignettes over to docs directory - run: | - cp vignettes/Python/RDD/rdd.html docs/vignettes/Python/rdd.html - cp vignettes/Python/RDD/RDD_DAG.png docs/vignettes/Python/RDD_DAG.png - cp vignettes/Python/RDD/trees1.png docs/vignettes/Python/trees1.png - cp vignettes/Python/RDD/trees2.png docs/vignettes/Python/trees2.png - cp vignettes/Python/RDD/trees3.png docs/vignettes/Python/trees3.png - cp vignettes/R/RDD/rdd.html docs/vignettes/R/rdd.html - cp vignettes/Python/IV/iv.html docs/vignettes/Python/iv.html - cp vignettes/Python/IV/IV_CDAG.png docs/vignettes/Python/IV_CDAG.png - cp vignettes/R/IV/iv.html docs/vignettes/R/iv.html - - name: Build the overall doc site run: | - mkdocs build + quarto render - name: Deploy to GitHub pages 🚀 if: github.event_name != 'pull_request' uses: JamesIves/github-pages-deploy-action@v4 with: branch: gh-pages - folder: site \ No newline at end of file + folder: _site \ No newline at end of file diff --git a/.gitignore b/.gitignore index a64416ef9..94c7985dd 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,16 @@ yarn-error.log* /site/ /docs/cpp_docs/doxygen/ /docs/R_docs/pkgdown/* +_site/ +_freeze/ + +# Quarto render artifacts (output-dir should be _site/, but these appear in-place too) +/*.html +/site_libs/ +/objects.json +/development/*.html +/python-api/**/*.html +/python-api/reference/*.qmd # Virtual environments /python_venv @@ -41,3 +51,7 @@ yarn-error.log* /venv .venv .Rproj.user + +/.quarto/ +**/*.quarto_ipynb +**/*.rmarkdown diff --git a/.here b/.here new file mode 100644 index 000000000..e69de29bb diff --git a/README.md b/README.md index e6208b9f0..cdf0ad8df 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,47 @@ Rscript -e 'install.packages(c("remotes", "devtools", "roxygen2", "ggplot2", "la Rscript -e 'remotes::install_github("StochasticTree/stochtree", ref = "r-dev")' ``` +#### Cloning the stochtree repo + +Building the R API docs (roxygen2 / pkgdown) and C++ API docs (Doxygen) requires the stochtree source. Clone it into `stochtree_repo/` at the repo root: + +```bash +git clone --recurse-submodules https://github.com/StochasticTree/stochtree.git stochtree_repo +``` + +#### Building the pkgdown site for the R API + +With the stochtree repo checked out and R dependencies installed (see above), run: + +```bash +cd stochtree_repo +Rscript cran-bootstrap.R 0 1 0 +cd .. +mkdir -p docs/R_docs/pkgdown +Rscript -e 'pkgdown::build_site_github_pages("stochtree_repo/stochtree_cran", dest_dir = "../../docs/R_docs/pkgdown", install = TRUE)' +cd stochtree_repo +Rscript cran-cleanup.R +cd .. +``` + +`cran-bootstrap.R 0 1 0` prepares a `stochtree_cran/` subdirectory with the pkgdown config but without vignette source (vignettes are served by the Quarto site instead). `cran-cleanup.R` removes that temporary directory when done. The output is written to `docs/R_docs/pkgdown/`. + +#### Building the Doxygen site for the C++ API + +With the stochtree repo checked out and Doxygen installed (`brew install doxygen` on macOS), run: + +```bash +sed -i '' 's|^OUTPUT_DIRECTORY *=.*|OUTPUT_DIRECTORY = ../docs/cpp_docs/|' stochtree_repo/Doxyfile +sed -i '' 's|^GENERATE_XML *=.*|GENERATE_XML = NO|' stochtree_repo/Doxyfile +sed -i '' 's|^GENERATE_HTML *=.*|GENERATE_HTML = YES|' stochtree_repo/Doxyfile +mkdir -p docs/cpp_docs/ +cd stochtree_repo +doxygen Doxyfile +cd .. +``` + +The output is written to `docs/cpp_docs/doxygen/`. + #### Building the vignettes with quarto The vignettes live in the `vignettes/` directory and are configured as a standalone Quarto website via `vignettes/_quarto.yml`. Each `.qmd` file uses `{.panel-tabset group="language"}` tabsets to present R and Python code side-by-side. Python cells are executed via `reticulate`; set the `RETICULATE_PYTHON` environment variable to point at your `.venv` interpreter if it isn't picked up automatically. @@ -59,25 +100,29 @@ quarto preview The rendered site is written to `vignettes/_site/`. Individual vignettes use `freeze: auto` in their frontmatter, so re-renders only re-execute cells whose source has changed. To force a full re-execution, delete `vignettes/_freeze/` before rendering. -### Building the doxygen site for the C++ API - -First, ensure that you have [doxygen](https://www.doxygen.nl/index.html) installed. -On MacOS, this can be [done via homebrew](https://formulae.brew.sh/formula/doxygen) (i.e. `brew install doxygen`). +#### Regenerating the Python API reference pages -Then, modify the `Doxyfile` to build the C++ documentation as desired and build the doxygen site +The `python-api/reference/*.qmd` files are generated by quartodoc from the stochtree package's docstrings. They are checked into the repo, so a normal `quarto render` will render whatever is already there. If you've updated docstrings in the stochtree Python package, regenerate them first: -```{bash} -sed -i '' 's|^OUTPUT_DIRECTORY *=.*|OUTPUT_DIRECTORY = ../docs/cpp_docs/|' stochtree_repo/Doxyfile -sed -i '' 's|^GENERATE_XML *=.*|GENERATE_XML = NO|' stochtree_repo/Doxyfile -sed -i '' 's|^GENERATE_HTML *=.*|GENERATE_HTML = YES|' stochtree_repo/Doxyfile -mkdir -p docs/cpp_docs/ -cd stochtree_repo -doxygen Doxyfile -cd .. +```bash +source .venv/bin/activate +quartodoc build ``` +This reads the `quartodoc:` block in `_quarto.yml` and writes updated `.qmd` files to `python-api/reference/`. Run it from the repo root. After regenerating, commit the updated `.qmd` files and run `quarto render` as normal. + +The CI workflow always runs `quartodoc build` before `quarto render` so the live site stays in sync with the latest package docstrings. + ### Building the overall website -The overall site is built and deployed via the GitHub Actions workflow in `.github/workflows/docs.yml`, which renders the Quarto vignettes, builds Doxygen (C++ API) and pkgdown (R API) docs, and publishes the result to the `gh-pages` branch. +The full site (vignettes + Python API reference + embedded pkgdown/Doxygen) is built from the repo root with: + +```bash +quarto render +``` + +This requires pkgdown and Doxygen output to already exist at `docs/R_docs/pkgdown/` and `docs/cpp_docs/doxygen/` respectively (the CI workflow builds these before running `quarto render`). For iterating on vignettes alone, the `cd vignettes && quarto render` workflow described above is faster. + +**Freeze cache note:** The vignette `.qmd` files use `freeze: auto`, so re-renders only re-execute cells whose source has changed. The freeze cache lives at `_freeze/vignettes/` (top-level render) or `vignettes/_freeze/` (standalone vignette render). If you switch between the two render modes, copy the cache to the appropriate location before rendering to avoid unnecessary re-execution. -To build and preview the vignette site locally, use `quarto preview` as described above. Full-site local builds (including embedded Doxygen / pkgdown output) are best done through the CI workflow. +The CI workflow (`.github/workflows/docs.yml`) handles the full build and deploys the output `_site/` directory to the `gh-pages` branch. diff --git a/_quarto.yml b/_quarto.yml new file mode 100644 index 000000000..da63788d9 --- /dev/null +++ b/_quarto.yml @@ -0,0 +1,155 @@ +project: + type: website + output-dir: _site + render: + - "*.qmd" + - "vignettes/*.qmd" + - "python-api/reference/*.qmd" + - "development/*.qmd" + resources: + - docs/R_docs/pkgdown/ + - docs/cpp_docs/doxygen/ + +website: + title: "StochTree" + site-url: "https://stochtree.ai/" + repo-url: "https://github.com/StochasticTree/stochtree" + repo-actions: [issue] + + navbar: + left: + - href: index.qmd + text: Home + - href: getting-started.qmd + text: Getting Started + - href: about.qmd + text: About + - text: R Package + href: docs/R_docs/pkgdown/index.html + - text: Python API + href: python-api/reference/index.qmd + - text: C++ API + href: docs/cpp_docs/doxygen/index.html + - text: Vignettes + href: vignettes/index.qmd + - text: Development + menu: + - text: Overview + href: development/index.qmd + - text: Contributing + href: development/contributing.qmd + - text: Adding New Models + href: development/new-models.qmd + - text: Roadmap + href: development/roadmap.qmd + + sidebar: + - id: python-api + title: "Python API" + style: docked + contents: + - python-api/reference/index.qmd + - section: "Core Models" + contents: + - python-api/reference/bart.BARTModel.qmd + - python-api/reference/bcf.BCFModel.qmd + - section: "Scikit-Learn Interface" + contents: + - python-api/reference/sklearn.StochTreeBARTRegressor.qmd + - python-api/reference/sklearn.StochTreeBARTBinaryClassifier.qmd + - section: "Low-Level API" + contents: + - python-api/reference/data.Dataset.qmd + - python-api/reference/data.Residual.qmd + - python-api/reference/forest.Forest.qmd + - python-api/reference/forest.ForestContainer.qmd + - python-api/reference/sampler.ForestSampler.qmd + - python-api/reference/sampler.GlobalVarianceModel.qmd + - python-api/reference/sampler.LeafVarianceModel.qmd + + - id: vignettes + title: "Vignettes" + style: docked + contents: + - vignettes/index.qmd + - section: "Core Models" + contents: + - vignettes/bart.qmd + - vignettes/bcf.qmd + - vignettes/heteroskedastic.qmd + - vignettes/ordinal-outcome.qmd + - vignettes/multivariate-bcf.qmd + - section: "Practical Topics" + contents: + - vignettes/serialization.qmd + - vignettes/multi-chain.qmd + - vignettes/tree-inspection.qmd + - vignettes/summary-plotting.qmd + - vignettes/prior-calibration.qmd + - vignettes/sklearn.qmd + - section: "Low-Level Interface" + contents: + - vignettes/custom-sampling.qmd + - vignettes/ensemble-kernel.qmd + - section: "Advanced Methods" + contents: + - vignettes/rdd.qmd + - vignettes/iv.qmd + +format: + html: + theme: [minty, assets/custom.scss] + css: assets/api.css + toc: true + toc-depth: 3 + grid: + body-width: 1000px + margin-width: 200px + +execute: + freeze: auto + +quartodoc: + package: stochtree + dir: python-api/reference + style: pkgdown + parser: numpy + render_interlinks: false + sections: + - title: Core Models + desc: High-level model interfaces for supervised learning and causal inference. + contents: + - name: bart.BARTModel + member_order: source + - name: bcf.BCFModel + member_order: source + - title: Scikit-Learn Interface + desc: stochtree models wrapped as sklearn-compatible estimators. + contents: + - name: sklearn.StochTreeBARTRegressor + member_order: source + - name: sklearn.StochTreeBARTBinaryClassifier + member_order: source + - title: Low-Level API — Data + desc: Data structures for custom sampling workflows. + contents: + - name: data.Dataset + member_order: source + - name: data.Residual + member_order: source + - title: Low-Level API — Forest + desc: Forest containers and inspection. + contents: + - name: forest.Forest + member_order: source + - name: forest.ForestContainer + member_order: source + - title: Low-Level API — Samplers + desc: Sampler classes for building custom models. + contents: + - name: sampler.ForestSampler + member_order: source + - name: sampler.GlobalVarianceModel + member_order: source + - name: sampler.LeafVarianceModel + member_order: source diff --git a/about.qmd b/about.qmd new file mode 100644 index 000000000..9a8159ab5 --- /dev/null +++ b/about.qmd @@ -0,0 +1,105 @@ +--- +title: "Overview of Stochastic Tree Models" +--- + +Stochastic tree models are a powerful addition to your modeling toolkit. +As with many machine learning methods, understanding these models in depth is an involved task. + +There are many excellent published papers on stochastic tree models +(to name a few, the [original BART paper](https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full), +[the XBART paper](https://www.tandfonline.com/doi/full/10.1080/01621459.2021.1942012), +and [the BCF paper](https://projecteuclid.org/journals/bayesian-analysis/volume-15/issue-3/Bayesian-Regression-Tree-Models-for-Causal-Inference--Regularization-Confounding/10.1214/19-BA1195.full)). +Here, we aim to build up an abbreviated intuition for these models from their conceptually-simple building blocks. + +## Notation + +We're going to introduce some notation to make these concepts precise. +In a traditional supervised learning setting, we hope to predict some **outcome** from **features** in a training dataset. +We'll call the outcome $y$ and the features $X$. +Our goal is to come up with a function $f$ that predicts the outcome $y$ as well as possible from $X$ alone. + +## Decision Trees + +[Decision tree learning](https://en.wikipedia.org/wiki/Decision_tree_learning) is a simple machine learning method that +constructs a function $f$ from a series of conditional statements. Consider the tree below. + +```{mermaid} +stateDiagram-v2 + state split_one <> + state split_two <> + split_one --> split_two: if x1 <= 1 + split_one --> c : if x1 > 1 + split_two --> a: if x2 <= -2 + split_two --> b : if x2 > -2 +``` + +We evaluate two conditional statements (`X[,1] > 1` and `X[,2] > -2`), arranged in a tree-like sequence of branches, +which determine whether the model predicts `a`, `b`, or `c`. We could similarly express this tree in math notation as + +$$ +f(X_i) = \begin{cases} +a & ; \;\;\; X_{i,1} \leq 1, \;\; X_{i,2} \leq -2\\ +b & ; \;\;\; X_{i,1} \leq 1, \;\; X_{i,2} > -2\\ +c & ; \;\;\; X_{i,1} > 1 +\end{cases} +$$ + +We won't belabor the discussion of trees as there are many good textbooks and online articles on the topic, +but we'll close by noting that training decision trees introduces a delicate balance between +[overfitting and underfitting](https://en.wikipedia.org/wiki/Overfitting). +Simple trees like the one above do not capture much complexity in a dataset and may potentially be underfit +while deep, complex trees are vulnerable to overfitting and tend to have high variance. + +## Boosted Decision Tree Ensembles + +One way to address the overfitting-underfitting tradeoff of decision trees is to build an "ensemble" of decision +trees, so that the function $f$ is defined by a sum of $k$ individual decision trees $g_i$ + +$$ +f(X_i) = g_1(X_i) + \dots + g_k(X_i) +$$ + +There are several ways to train an ensemble of decision trees (sometimes called "forests"), the most popular of which are [random forests](https://en.wikipedia.org/wiki/Random_forest) and +[gradient boosting](https://en.wikipedia.org/wiki/Gradient_boosting). Their main difference is that random forests train +all $m$ trees independently of one another, while boosting trains trees sequentially, so that tree $j$ depends on the result of training trees 1 through $j-1$. +Libraries like [xgboost](https://xgboost.readthedocs.io/en/stable/) and [LightGBM](https://lightgbm.readthedocs.io/en/latest/) are popular examples of boosted tree ensembles. + +Tree ensembles often [outperform neural networks and other machine learning methods on tabular datasets](https://arxiv.org/abs/2207.08815), +but classic tree ensemble methods return a single estimated function $f$, without expressing uncertainty around its estimates. + +## Stochastic Tree Ensembles + +[Stochastic](https://en.wikipedia.org/wiki/Stochastic) tree ensembles differ from their classical counterparts in their use of randomness in learning a function. +Rather than returning a single "best" tree ensemble, stochastic tree ensembles return a range of tree ensembles that fit the data well. +Mechanically, it's useful to think of "sampling" -- rather than "fitting" -- a stochastic tree ensemble model. + +Why is this useful? Suppose we've sampled $m$ forests. For each observation $i$, we obtain $m$ predictions: $[f_1(X_i), \dots, f_m(X_i)]$. +From this "dataset" of predictions, we can compute summary statistics, where a mean or median would give something akin to the predictions of an xgboost or lightgbm model, +and the $\alpha$ and $1-\alpha$ quantiles give a [credible interval](https://en.wikipedia.org/wiki/Credible_interval). + +Rather than explain each of the models that `stochtree` supports in depth here, we provide a high-level overview, with pointers to the relevant literature. + +### Supervised Learning + +The [`bart`](docs/R_docs/pkgdown/reference/bart.html) R function and the [`BARTModel`](python-api/reference/bart.BARTModel.qmd) Python class are the primary interface for supervised +prediction tasks in `stochtree`. The primary references for these models are +[BART (Chipman, George, McCulloch 2010)](https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full) and +[XBART (He and Hahn 2021)](https://www.tandfonline.com/doi/full/10.1080/01621459.2021.1942012). + +In addition to the standard BART / XBART models, in which each tree's leaves return a constant prediction, `stochtree` also supports +arbitrary leaf regression on a user-provided basis (i.e. an expanded version of [Chipman et al 2002](https://link.springer.com/article/10.1023/A:1013916107446) and [Gramacy and Lee 2012](https://www.tandfonline.com/doi/abs/10.1198/016214508000000689)). + +### Causal Inference + +The [`bcf`](docs/R_docs/pkgdown/reference/bcf.html) R function and the [`BCFModel`](python-api/reference/bcf.BCFModel.qmd) Python class are the primary interface for causal effect +estimation in `stochtree`. The primary references for these models are +[BCF (Hahn, Murray, Carvalho 2021)](https://projecteuclid.org/journals/bayesian-analysis/volume-15/issue-3/Bayesian-Regression-Tree-Models-for-Causal-Inference--Regularization-Confounding/10.1214/19-BA1195.full) and +[XBCF (Krantsevich, He, Hahn 2022)](https://arxiv.org/abs/2209.06998). + +### Additional Modeling Features + +Both the BART and BCF interfaces in `stochtree` support the following extensions: + +* Accelerated / "warm-start" sampling of forests (i.e. [He and Hahn 2021](https://www.tandfonline.com/doi/full/10.1080/01621459.2021.1942012)) +* Forest-based heteroskedasticity (i.e. [Murray 2021](https://www.tandfonline.com/doi/abs/10.1080/01621459.2020.1813587)) +* Additive random effects (i.e. [Gelman et al 2008](https://www.tandfonline.com/doi/abs/10.1198/106186008X287337)) diff --git a/assets/api.css b/assets/api.css new file mode 100644 index 000000000..e5b738be8 --- /dev/null +++ b/assets/api.css @@ -0,0 +1,37 @@ +/* ── Secondary nav bar (breadcrumb bar below main navbar) ──────────────── + The bar uses var(--bs-breadcrumb-bg) for its background, so we override + the variable rather than the property. + ──────────────────────────────────────────────────────────────────────── */ +:root { + --bs-breadcrumb-bg: #ffffff; +} + +/* ── Secondary nav bar: links (e.g. "Core Models") ────────────────────────── + Breadcrumb item links inherit the global link color (teal) which disappears + on the now-white breadcrumb bar. Override to the same dark slate as the text. + ──────────────────────────────────────────────────────────────────────────── */ +.quarto-secondary-nav a, +.breadcrumb-item a { + color: #2d3748 !important; +} + +/* ── Methods summary table ─────────────────────────────────────────────── + The methods table lives in
. + First column (method name): never wrap, pin a minimum width so long + names like `compute_posterior_interval` stay on one line. + ──────────────────────────────────────────────────────────────────────── */ +section#methods table.table td:first-child, +section#methods table.table th:first-child { + min-width: 240px; + white-space: nowrap; +} + +/* ── Parameter / Returns tables ────────────────────────────────────────── + These live in
+ Give the Name column a floor so it isn't crowded by Type/Description. + ──────────────────────────────────────────────────────────────────────── */ +.doc-section table.table td:first-child, +.doc-section table.table th:first-child { + min-width: 140px; + white-space: nowrap; +} diff --git a/assets/custom.scss b/assets/custom.scss new file mode 100644 index 000000000..4e818bc9d --- /dev/null +++ b/assets/custom.scss @@ -0,0 +1,16 @@ +/*-- scss:defaults --*/ + +// Replace minty's green primary with a teal-blue +$primary: #1a7a9c; + +// Breadcrumb bar: white background, dark text +$breadcrumb-bg: #ffffff; +$breadcrumb-color: #2d3748; +$breadcrumb-active-color: #2d3748; +$breadcrumb-divider-color: #2d3748; + +// Dark navbar +$navbar-bg: #2d3748; +$navbar-fg: #ffffff; +$navbar-hl: #7dd3e8; // lighter teal-blue for active/hover links + diff --git a/development/contributing.qmd b/development/contributing.qmd new file mode 100644 index 000000000..44b951a21 --- /dev/null +++ b/development/contributing.qmd @@ -0,0 +1,277 @@ +--- +title: "Contributing" +--- + +`stochtree` is hosted on [Github](https://github.com/StochasticTree/stochtree/). +Any feedback, requests, or bug reports can be submitted as [issues](https://github.com/StochasticTree/stochtree/issues). +Moreover, if you have ideas for how to improve stochtree, we welcome [pull requests](https://github.com/StochasticTree/stochtree/pulls). + +## Building StochTree + +Any local stochtree development will require cloning the repository from Github. +If you don't have git installed, you can do so following [these instructions](https://learn.microsoft.com/en-us/devops/develop/git/install-and-set-up-git). + +Once git is available at the command line, navigate to the folder that will store this project (in bash / zsh, this is done by running `cd` followed by the path to the directory). +Then, clone the `stochtree` repo as a subfolder by running + +```bash +git clone --recursive https://github.com/StochasticTree/stochtree.git +``` + +*NOTE*: this project incorporates several C++ dependencies as [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules), +which is why the `--recursive` flag is necessary. If you have already cloned the repo without the `--recursive` flag, +you can retrieve the submodules recursively by running `git submodule update --init --recursive` in the main repo directory. + +### R + +This section will detail how to use RStudio to build and make changes to stochtree. There are other tools that are useful for R +package development (for example, [Positron](https://github.com/posit-dev/positron), [VS Code](https://code.visualstudio.com/docs/languages/r), +and [ESS](https://ess.r-project.org/)), but we will focus on RStudio in this walkthrough. + +Once you've cloned the stochtree repository, follow these steps to build stochtree: + +1. [Create an RStudio project in the stochtree directory](https://support.posit.co/hc/en-us/articles/200526207-Using-RStudio-Projects) +2. [Build the package in RStudio](https://docs.posit.co/ide/user/ide/guide/pkg-devel/writing-packages.html#building-a-package) + +Note that due to the complicated folder structure of the stochtree repo, step 2 might not work out of the box on all platforms. +If stochtree fails to build, you can use the script that we use to create a CRAN-friendly stochtree R package directory, which +creates a `stochtree_cran` subdirectory of the stochtree folder and copies the relevant R package files into this subfolder. +You can run this script by entering `Rscript cran-bootstrap.R 1 1 1` in the terminal in RStudio. +Once you have a `stochtree_cran` subfolder, you can build stochtree using + +```r +devtools::install_local("stochtree_cran") +``` + +Since this is a temporary folder, you can clean it up by running `Rscript cran-cleanup.R` in the terminal in RStudio. + +### Python + +Building and making changes to the python library is best done in an isolated virtual environment. There are many different ways of +managing virtual environments in Python, but here we focus on `conda` and `venv`. + +#### Conda + +Conda provides a straightforward experience in managing python dependencies, avoiding version conflicts / ABI issues / etc. + +To build stochtree using a `conda` based workflow, first create and activate a conda environment with the requisite dependencies + +```bash +conda create -n stochtree-dev -c conda-forge python=3.10 numpy scipy pytest pandas pybind11 scikit-learn matplotlib seaborn +conda activate stochtree-dev +pip install jupyterlab +``` + +Then install the package by navigating to the stochtree directory and running + +```bash +pip install . +``` + +Note that if you are making changes and finding that they aren't reflected after a reinstall of stochtree, you can +clear all of the python package build artifacts with + +```bash +rm -rf stochtree.egg-info; rm -rf .pytest_cache; rm -rf build +``` + +and then rerun `pip install .` + +#### Venv + +You could also use venv for environment management. First, navigate to the folder in which you usually store virtual environments +(i.e. `cd /path/to/envs`) and create and activate a virtual environment: + +```bash +python -m venv venv +source venv/bin/activate +``` + +Install all of the package (and demo notebook) dependencies + +```bash +pip install numpy scipy pytest pandas scikit-learn pybind11 matplotlib seaborn jupyterlab +``` + +Then install the package by navigating to the stochtree directory and running + +```bash +pip install . +``` + +Note that if you are making changes and finding that they aren't reflected after a reinstall of stochtree, you can +clear all of the python package development artifacts with + +```bash +rm -rf stochtree.egg-info; rm -rf .pytest_cache; rm -rf build +``` + +and then rerun `pip install .` + +### C++ + +#### CMake + +The C++ project can be built independently from the R / Python packages using `cmake`. +See [here](https://cmake.org/install/) for details on installing cmake (alternatively, +on MacOS, `cmake` can be installed using [homebrew](https://formulae.brew.sh/formula/cmake)). +Once `cmake` is installed, you can build the CLI by navigating to the main +project directory at your command line (i.e. `cd /path/to/stochtree`) and +running the following code + +```bash +rm -rf build +mkdir build +cmake -S . -B build +cmake --build build +``` + +The CMake build has two primary targets, which are detailed below. + +##### Debug Program + +`debug/api_debug.cpp` defines a standalone target that can be straightforwardly run with a debugger (i.e. `lldb`, `gdb`) +while making non-trivial changes to the C++ code. +This debugging program is compiled as part of the CMake build if the `BUILD_DEBUG_TARGETS` option in `CMakeLists.txt` is set to `ON`. + +Once the program has been built, it can be run from the command line via `./build/debugstochtree` or attached to a debugger +via `lldb ./build/debugstochtree` (clang) or `gdb ./build/debugstochtree` (gcc). + +##### Unit Tests + +We test `stochtree` using the [GoogleTest](https://google.github.io/googletest/) framework. +Unit tests are compiled into a single target as part of the CMake build if the `BUILD_TEST` option is set to `ON` +and the test suite can be run after compilation via `./build/teststochtree`. + +## Debugging + +Debugging stochtree invariably leads to the "core" C++ codebase, which requires care to debug correctly. +Below we detail how to debug stochtree's C++ core through each of the three interfaces (C++, R and Python). + +### C++ Program + +The `debugstochtree` cmake target exists precisely to quickly debug the C++ core of stochtree. + +First, you must build the program using debug symbols, which you can do by enabling the `USE_DEBUG` option +and ensuring that `BUILD_DEBUG_TARGETS` is also switched on, as below + +```bash +rm -rf build +mkdir build +cmake -S . -B build -DBUILD_DEBUG_TARGETS=ON -DUSE_DEBUG=ON +cmake --build build +``` + +From here, you can debug at the command line using [lldb](https://lldb.llvm.org/) on MacOS or [gdb](https://sourceware.org/gdb/) on Linux by running +either `lldb ./build/debugstochtree` or `gdb ./build/debugstochtree` and using the appropriate shortcuts to navigate your program. + +#### Xcode + +While using `gdb` or `lldb` on `debugstochtree` at the command line is very helpful, users may prefer debugging in a full-fledged IDE like Xcode (if working on MacOS). +This project's C++ core can be converted to an Xcode project from `CMakeLists.txt`, but first you must turn off sanitizers +(Xcode has its own way of setting this at build time, and having injected +`-fsanitize=address` statically into compiler arguments will cause errors). To do this, modify the `USE_SANITIZER` line in `CMakeLists.txt`: + +``` +option(USE_SANITIZER "Use santizer flags" OFF) +``` + +To generate an Xcode project, navigate to the main project folder and run: + +```bash +rm -rf xcode/ +mkdir xcode +cd xcode +cmake -G Xcode .. -DCMAKE_C_COMPILER=cc -DCMAKE_CXX_COMPILER=c++ -DUSE_SANITIZER=OFF -DUSE_DEBUG=OFF +cd .. +``` + +Now, if you navigate to the xcode subfolder (in Finder), you should be able to click on a `.xcodeproj` file and the project will open in Xcode. + +### R Package + +Debugging stochtree R code requires building the R package with debug symbols. +The simplest way to do this is to open your R installation's `Makevars` file +by running `usethis::edit_r_makevars()` in RStudio which will open `Makevars` +in a code editor. + +If your `Makevars` file already has a line that begins with `CXX17FLAGS = ...`, +look for a `-g -O2` compiler flag and change this to `-g -O0`. If this flag isn't +set in the `CXX17FLAGS = ` line, then simply add `-g -O0` to this line after the ` = `. +If your `Makevars` file does not have a line that begins with `CXX17FLAGS = ...`, +add `CXX17FLAGS = -g -O0`. + +Now, rebuild the R package as above. Save the R code you'd like to debug to an R script. +Suppose for the sake of illustration that the code you want to debug is saved in +`path/to/debug_script.R`. + +At the command line (either the terminal in RStudio or your local terminal program), +run `R -d lldb` if you are using MacOS (or `R -d gdb` if you are using Linux). + +Now, you'll see an lldb prompt which should look like below with a blinking cursor after it + +``` +(lldb) +``` + +From there, you can set breakpoints, either to specific lines of specific files like `b src/tree.cpp:2117` or to break whenever there is an error using `breakpoint set -E c++`. +(**Note**: in gdb, the breakpoint and control flow commands are slightly different, see [here](https://www.maths.ed.ac.uk/~swood34/RCdebug/RCdebug.html) for more detail on debugging R through `gdb`.) +Now, you can run R through the debugger by typing + +``` +r +``` + +This should load an R console, from which you can execute a script you've set up to run your code using + +```r +source("path/to/debug_script.R") +``` + +The code will either stop when it hits your first line-based breakpoint or when it runs into an error if you set the error-based breakpoint. +From there, you can navigate using `lldb` (or `gdb`) commands. + +**Note**: once you've loaded the R console, you can also simply interactively run commands that call stochtree's C++ code (i.e. running the `bart()` or `bcf()` functions). If you're debugging at this level, you probably have a specific problem in mind, and using a repeatable script will be worth your while, but it is not strictly necessary. + +### Python Package + +First, you need to build stochtree's C++ extension with debug symbols. +As always, start by navigating to the stochtree directory (i.e. `cd /path/to/stochtree/`) +and activating your development virtual environment (i.e. `conda activate [env_name]` or `source venv/bin/activate`). + +Since stochtree builds its C++ extension via cmake [following this example](https://github.com/pybind/cmake_example), +you'll need to ensure that the `self.debug` field in the `CMakeBuild` class is set to `True`. +You can do this by setting an environment variable of `DEBUG` equal to 1. +In bash, you can do this with `export DEBUG=1` at the command line. + +Once this is done, build the python library using + +```bash +pip install . +``` + +Suppose you'd like to debug stochtree through a script called `/path/to/script.py`. + +First, target a python process with `lldb` (or, alternatively, replace with `gdb` below if you use `gcc` as your compiler) via + +``` +lldb python +``` + +Now, you'll see an lldb (or gdb) prompt which should look like below with a blinking cursor after it + +``` +(lldb) +``` + +From there, you can set breakpoints, either to specific lines of specific files like `b src/tree.cpp:2117` or to break whenever there is an error using `breakpoint set -E c++`. +(If you're using `gdb`, see [here](https://lldb.llvm.org/use/map.html) for a comparison between lldb commands and gdb commands for setting breakpoints and navigating your program.) +Now you can run your python script through the debugger by typing + +``` +r /path/to/script.py +``` + +The program will run until the first breakpoint is hit, and at this point you can navigate using lldb (or gdb) commands. + +**Note**: rather than running a script like `/path/to/script.py` above, you can also simply load the python console by typing `r` at the `(lldb)` terminal and then interactively run commands that call stochtree's C++ code (i.e. sampling `BARTModel` or `BCFModel` objects). If you're debugging at this level, you probably have a specific problem in mind, and using a repeatable script will be worth your while, but it is not strictly necessary. diff --git a/development/index.qmd b/development/index.qmd new file mode 100644 index 000000000..9a308254b --- /dev/null +++ b/development/index.qmd @@ -0,0 +1,9 @@ +--- +title: "Development" +--- + +`stochtree` is in active development. Here, we detail some aspects of the development process: + +* [Contributing](contributing.qmd): how to get involved with stochtree, by contributing code, documentation, or helpful feedback +* [Adding New Models](new-models.qmd): how to add a new outcome model in C++ and make it available through the R and Python frontends +* [Roadmap](roadmap.qmd): timelines for new feature development and releases diff --git a/development/new-models.qmd b/development/new-models.qmd new file mode 100644 index 000000000..90a6cda11 --- /dev/null +++ b/development/new-models.qmd @@ -0,0 +1,273 @@ +--- +title: "Adding New Models to stochtree" +--- + +While the process of working with `stochtree`'s codebase to add +functionality or fix bugs is covered in the [contributing](contributing.qmd) +page, this page discusses a specific type of contribution in detail: +contributing new models (i.e. likelihoods and leaf parameter priors). + +Our C++ core is designed to support any conditionally-conjugate model, but this flexibility requires some explanation in order to be easily modified. + +## Overview + +The key components of `stochtree`'s models are: + +1. A **SuffStat** class that stores and accumulates sufficient statistics +2. A **LeafModel** class that computes marginal likelihoods / posterior parameters and samples leaf node parameters + +Each model implements a different version of these two classes. For example, the "classic" +BART model with constant Gaussian leaves and a Gaussian likelihood is represented by the +`GaussianConstantSuffStat` and `GaussianConstantLeafModel` classes. + +Each class implements a common API, and we use a [factory pattern](https://en.wikipedia.org/wiki/Factory_(object-oriented_programming)) and the C++17 +[std::variant](https://www.cppreference.com/w/cpp/utility/variant.html) +feature to dispatch the correct model at runtime. +Finally, R and Python wrappers expose this flexibility through the BART / BCF interfaces. + +Adding a new leaf model thus requires implementing new `SuffStat` and `LeafModel` +classes, then updating the factory functions and R / Python logic. + +## SuffStat Class + +As a pattern, sufficient statistic classes end in `*SuffStat` and implement several methods: + +* `IncrementSuffStat`: Increment a model's sufficient statistics by one data observation +* `ResetSuffStat`: Reset a model's sufficient statistics to zero / empty +* `AddSuffStat`: Combine two sufficient statistics, storing their sum in the sufficient statistic object that calls this method (without modifying the supplied `SuffStat` objects) +* `SubtractSuffStat`: Same as above but subtracting the second `SuffStat` argument from the first, rather than adding +* `SampleGreaterThan`: Checks whether the current sample size of a `SuffStat` object is greater than some threshold +* `SampleGreaterThanEqual`: Checks whether the current sample size of a `SuffStat` object is greater than or equal to some threshold +* `SampleSize`: Returns the current sample size of a `SuffStat` object + +For the sake of illustration, imagine we are adding a model called `OurNewModel`. The new sufficient statistic class should look something like: + +```cpp +class OurNewModelSuffStat { + public: + data_size_t n; + // Custom sufficient statistics for `OurNewModel` + double stat1; + double stat2; + + OurNewModelSuffStat() { + n = 0; + stat1 = 0.0; + stat2 = 0.0; + } + + void IncrementSuffStat(ForestDataset& dataset, Eigen::VectorXd& outcome, + ForestTracker& tracker, data_size_t row_idx, int tree_idx) { + n += 1; + stat1 += /* accumulate from outcome, dataset, or tracker as needed */; + stat2 += /* accumulate from outcome, dataset, or tracker as needed */; + } + + void ResetSuffStat() { + n = 0; + stat1 = 0.0; + stat2 = 0.0; + } + + void AddSuffStat(OurNewModelSuffStat& lhs, OurNewModelSuffStat& rhs) { + n = lhs.n + rhs.n; + stat1 = lhs.stat1 + rhs.stat1; + stat2 = lhs.stat2 + rhs.stat2; + } + + void SubtractSuffStat(OurNewModelSuffStat& lhs, OurNewModelSuffStat& rhs) { + n = lhs.n - rhs.n; + stat1 = lhs.stat1 - rhs.stat1; + stat2 = lhs.stat2 - rhs.stat2; + } + + bool SampleGreaterThan(data_size_t threshold) { return n > threshold; } + bool SampleGreaterThanEqual(data_size_t threshold) { return n >= threshold; } + data_size_t SampleSize() { return n; } +}; +``` + +## LeafModel Class + +Leaf model classes end in `*LeafModel` and implement several methods: + +* `SplitLogMarginalLikelihood`: the log marginal likelihood of a potential split, as a function of the sufficient statistics for the newly proposed left and right node (i.e. ignoring data points unaffected by a split) +* `NoSplitLogMarginalLikelihood`: the log marginal likelihood of a node without splitting, as a function of the sufficient statistics for that node +* `SampleLeafParameters`: Sample the leaf node parameters for every leaf in a provided tree, according to this model's conditionally conjugate leaf node posterior +* `RequiresBasis`: Whether or not a model requires regressing on "basis functions" in the leaves + +As above, imagine that we are implementing a new model called `OurNewModel`. The new leaf model class should look something like: + +```cpp +class OurNewModelLeafModel { + public: + OurNewModelLeafModel(/* model parameters */) { + // Set model parameters + } + + double SplitLogMarginalLikelihood(OurNewModelSuffStat& left_stat, + OurNewModelSuffStat& right_stat, + double global_variance) { + double left_log_ml = /* calculate left node log ML */; + double right_log_ml = /* calculate right node log ML */; + return left_log_ml + right_log_ml; + } + + double NoSplitLogMarginalLikelihood(OurNewModelSuffStat& suff_stat, + double global_variance) { + double log_ml = /* calculate node log ML */; + return log_ml; + } + + void SampleLeafParameters(ForestDataset& dataset, ForestTracker& tracker, + ColumnVector& residual, Tree* tree, int tree_num, + double global_variance, std::mt19937& gen) { + // Sample parameters for every leaf in a tree, update `tree` directly + } + + inline bool RequiresBasis() { return /* true/false based on your model */; } + + // Helper methods below for `SampleLeafParameters`, which depend on the + // nature of the leaf model (i.e. location-scale, shape-scale, etc...) + + double PosteriorParameterMean(OurNewModelSuffStat& suff_stat, + double global_variance) { + return /* calculate posterior mean */; + } + + double PosteriorParameterVariance(OurNewModelSuffStat& suff_stat, + double global_variance) { + return /* calculate posterior variance */; + } + + private: + // Leaf model parameters + double param1_; + double param2_; +}; +``` + +## Factory Functions + +Updating the factory pattern to be able to dispatch `OurNewModel` has several steps. + +First, we add our model to the `ModelType` enum in `include/stochtree/leaf_model.h`: + +```cpp +enum ModelType { + kConstantLeafGaussian, + kUnivariateRegressionLeafGaussian, + kMultivariateRegressionLeafGaussian, + kLogLinearVariance, + kOurNewModel // New model +}; +``` + +Next, we add the `OurNewModelSuffStat` and `OurNewModelLeafModel` classes to the `std::variant` unions in `include/stochtree/leaf_model.h`: + +```cpp +using SuffStatVariant = std::variant; // New model + +using LeafModelVariant = std::variant; // New model +``` + +Finally, we update the factory functions to dispatch the correct class from the union based on the `ModelType` integer code + +```cpp +static inline SuffStatVariant suffStatFactory(ModelType model_type, int basis_dim = 0) { + if (model_type == kConstantLeafGaussian) { + return createSuffStat(); + } else if (model_type == kUnivariateRegressionLeafGaussian) { + return createSuffStat(); + } else if (model_type == kMultivariateRegressionLeafGaussian) { + return createSuffStat(basis_dim); + } else if (model_type == kLogLinearVariance) { + return createSuffStat(); + } else if (model_type == kOurNewModel) { // New model + return createSuffStat(); + } else { + Log::Fatal("Incompatible model type provided to suff stat factory"); + } +} + +static inline LeafModelVariant leafModelFactory(ModelType model_type, double tau, + Eigen::MatrixXd& Sigma0, double a, double b) { + if (model_type == kConstantLeafGaussian) { + return createLeafModel(tau); + } else if (model_type == kUnivariateRegressionLeafGaussian) { + return createLeafModel(tau); + } else if (model_type == kMultivariateRegressionLeafGaussian) { + return createLeafModel(Sigma0); + } else if (model_type == kLogLinearVariance) { + return createLeafModel(a, b); + } else if (model_type == kOurNewModel) { // New model + return createLeafModel(/* initializer values */); + } else { + Log::Fatal("Incompatible model type provided to leaf model factory"); + } +} +``` + +## R Wrapper + +To reflect this change through to the R interface, we first add the new model to the logic in the `sample_gfr_one_iteration_cpp` +and `sample_mcmc_one_iteration_cpp` functions in the `src/sampler.cpp` file + +```cpp +// Convert leaf model type to enum +StochTree::ModelType model_type; +if (leaf_model_int == 0) model_type = StochTree::ModelType::kConstantLeafGaussian; +else if (leaf_model_int == 1) model_type = StochTree::ModelType::kUnivariateRegressionLeafGaussian; +else if (leaf_model_int == 2) model_type = StochTree::ModelType::kMultivariateRegressionLeafGaussian; +else if (leaf_model_int == 3) model_type = StochTree::ModelType::kLogLinearVariance; +else if (leaf_model_int == 4) model_type = StochTree::ModelType::kOurNewModel; // New model +else StochTree::Log::Fatal("Invalid model type"); +``` + +Then we add the integer code for `OurNewModel` to the `leaf_model_type` field signature in `R/config.R` + +```r +#' @field leaf_model_type Integer specifying the leaf model type (0 = constant leaf, 1 = univariate leaf regression, 2 = multivariate leaf regression, 4 = your new model) +leaf_model_type = NULL, +``` + +## Python Wrapper + +Python's C++ wrapper code contains similar logic to that of the `src/sampler.cpp` file in the R interface. +Add the new model to the `SampleOneIteration` method of the `ForestSamplerCpp` class in the `src/py_stochtree.cpp` file. + +```cpp +// Convert leaf model type to enum +StochTree::ModelType model_type; +if (leaf_model_int == 0) model_type = StochTree::ModelType::kConstantLeafGaussian; +else if (leaf_model_int == 1) model_type = StochTree::ModelType::kUnivariateRegressionLeafGaussian; +else if (leaf_model_int == 2) model_type = StochTree::ModelType::kMultivariateRegressionLeafGaussian; +else if (leaf_model_int == 3) model_type = StochTree::ModelType::kLogLinearVariance; +else if (leaf_model_int == 4) model_type = StochTree::ModelType::kOurNewModel; // New model +else StochTree::Log::Fatal("Invalid model type"); +``` + +And then add the integer code for your new model to the `leaf_model_type` documentation in `stochtree/config.py`. + +## Additional Considerations + +Some of the `SuffStat` and `LeafModel` classes currently supported by stochtree require extra initialization parameters. +We support this via [variadic templates](https://en.cppreference.com/w/cpp/language/parameter_pack.html) in C++ + +```cpp +template +static inline void GFRSampleOneIter(TreeEnsemble& active_forest, ForestTracker& tracker, ForestContainer& forests, LeafModel& leaf_model, ForestDataset& dataset, + ColumnVector& residual, TreePrior& tree_prior, std::mt19937& gen, std::vector& variable_weights, + std::vector& sweep_update_indices, double global_variance, std::vector& feature_types, int cutpoint_grid_size, + bool keep_forest, bool pre_initialized, bool backfitting, int num_features_subsample, LeafSuffStatConstructorArgs&... leaf_suff_stat_args) +``` + +If your new classes take any initialization arguments, these are provided in the factory functions, so you might also need to edit the signature of the factory functions. diff --git a/development/roadmap.qmd b/development/roadmap.qmd new file mode 100644 index 000000000..f3715e5e5 --- /dev/null +++ b/development/roadmap.qmd @@ -0,0 +1,23 @@ +--- +title: "Development Roadmap" +--- + +We are working hard to make `stochtree` faster, easier to use, and more flexible! Below is a snapshot of our development roadmap. We categorize new product enhancements into four categories: + +1. **User Interface**: the way that a user can build, store, and use models +2. **Performance**: program runtime and memory usage of various models +3. **Modeling Features**: scope of modeling tools provided +4. **Interoperability**: compatibility with other computing and data libraries + +Our development goals are prioritized along three broad timelines + +1. **Now**: development is currently underway or planned for a near-term release +2. **Next**: design / research needed; development hinges on feasibility and time demands +3. **Later**: long-term goal; exploratory + +| Category | Now | Next | Later | +| --- | --- | --- | --- | +| User Interface | | | | +| Performance | | | Hardware acceleration (Apple Silicon GPU)
Hardware acceleration (NVIDIA GPU)
Out-of-memory sampler | +| Modeling Features | Quantile cutpoint sampling
Probit BART and BCF | Monotonicity constraints
Multiclass classification | | +| Interoperability | | | PyMC (Python)
Stan (R / Python)
Apache Arrow (R / Python)
Polars (Python) | diff --git a/getting-started.qmd b/getting-started.qmd new file mode 100644 index 000000000..f52461ef6 --- /dev/null +++ b/getting-started.qmd @@ -0,0 +1,182 @@ +--- +title: "Getting Started" +--- + +`stochtree` is composed of a C++ "core" and R / Python interfaces to that core. +Below, we detail how to install the R / Python packages, or work directly with the C++ codebase. + +## R Package + +### CRAN + +The R package can be installed from CRAN via + +```r +install.packages("stochtree") +``` + +### Development Version (Local Build) + +The development version of `stochtree` can be installed from Github via + +```r +remotes::install_github("StochasticTree/stochtree", ref="r-dev") +``` + +## Python Package + +### PyPI + +`stochtree`'s Python package can be installed from PyPI via + +```bash +pip install stochtree +``` + +### Development Version (Local Build) + +The development version of `stochtree` can be installed from source using pip's [git interface](https://pip.pypa.io/en/stable/topics/vcs-support/). +To proceed, you will need a working version of [git](https://git-scm.com) and python 3.8 or greater (available from several sources, one of the most +straightforward being the [anaconda](https://docs.conda.io/projects/conda/en/stable/user-guide/install/index.html) suite). + +#### Quick start + +Without worrying about virtual environments (detailed further below), `stochtree` can be installed from the command line + +```bash +pip install numpy scipy pytest pandas scikit-learn pybind11 +pip install git+https://github.com/StochasticTree/stochtree.git +``` + +#### Virtual environment installation + +Often, users prefer to manage different projects (with different package / python version requirements) in virtual environments. + +##### Conda + +Conda provides a straightforward experience in managing python dependencies, avoiding version conflicts / ABI issues / etc. + +To build stochtree using a `conda` based workflow, first create and activate a conda environment with the requisite dependencies + +```bash +conda create -n stochtree-dev -c conda-forge python=3.10 numpy scipy pytest pandas pybind11 scikit-learn +conda activate stochtree-dev +``` + +Then install the package from github via pip + +```bash +pip install git+https://github.com/StochasticTree/stochtree.git +``` + +(*Note*: if you'd like to run `stochtree`'s notebook examples, you will also need `jupyterlab`, `seaborn`, and `matplotlib`) + +```bash +conda install matplotlib seaborn +pip install jupyterlab +``` + +##### Venv + +You could also use venv for environment management. First, navigate to the folder in which you usually store virtual environments +(i.e. `cd /path/to/envs`) and create and activate a virtual environment: + +```bash +python -m venv venv +source venv/bin/activate +``` + +Install all of the package (and demo notebook) dependencies + +```bash +pip install numpy scipy pytest pandas scikit-learn pybind11 +``` + +Then install stochtree via + +```bash +pip install git+https://github.com/StochasticTree/stochtree.git +``` + +As above, if you'd like to run the notebook examples, you will also need `jupyterlab`, `seaborn`, and `matplotlib`: + +```bash +pip install matplotlib seaborn jupyterlab +``` + +## C++ Core + +While the C++ core links to both R and Python for a performant, high-level interface, +the C++ code can be compiled and unit-tested and compiled into a standalone +[debug program](https://github.com/StochasticTree/stochtree/tree/main/debug). + +### Compilation + +#### Cloning the Repository + +To clone the repository, you must have git installed, which you can do following [these instructions](https://learn.microsoft.com/en-us/devops/develop/git/install-and-set-up-git). + +Once git is available at the command line, navigate to the folder that will store this project (in bash / zsh, this is done by running `cd` followed by the path to the directory). +Then, clone the `stochtree` repo as a subfolder by running + +```bash +git clone --recursive https://github.com/StochasticTree/stochtree.git +``` + +*NOTE*: this project incorporates several dependencies as [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules), +which is why the `--recursive` flag is necessary (some systems may perform a recursive clone without this flag, but +`--recursive` ensures this behavior on all platforms). If you have already cloned the repo without the `--recursive` flag, +you can retrieve the submodules recursively by running `git submodule update --init --recursive` in the main repo directory. + +#### CMake Build + +The C++ project can be built independently from the R / Python packages using `cmake`. +See [here](https://cmake.org/install/) for details on installing cmake (alternatively, +on MacOS, `cmake` can be installed using [homebrew](https://formulae.brew.sh/formula/cmake)). +Once `cmake` is installed, you can build the CLI by navigating to the main +project directory at your command line (i.e. `cd /path/to/stochtree`) and +running the following code + +```bash +rm -rf build +mkdir build +cmake -S . -B build +cmake --build build +``` + +The CMake build has two primary targets, which are detailed below. + +##### Debug Program + +`debug/api_debug.cpp` defines a standalone target that can be straightforwardly run with a debugger (i.e. `lldb`, `gdb`) +while making non-trivial changes to the C++ code. +This debugging program is compiled as part of the CMake build if the `BUILD_DEBUG_TARGETS` option in `CMakeLists.txt` is set to `ON`. + +Once the program has been built, it can be run from the command line via `./build/debugstochtree` or attached to a debugger +via `lldb ./build/debugstochtree` (clang) or `gdb ./build/debugstochtree` (gcc). + +##### Unit Tests + +We test `stochtree` using the [GoogleTest](https://google.github.io/googletest/) framework. +Unit tests are compiled into a single target as part of the CMake build if the `BUILD_TEST` option is set to `ON` +and the test suite can be run after compilation via `./build/teststochtree`. + +### Xcode + +While using `gdb` or `lldb` on `debugstochtree` at the command line is very helpful, users may prefer debugging in a full-fledged IDE like Xcode. This project's C++ core can be converted to an Xcode project from `CMakeLists.txt`, but first you must turn off sanitizers. To do this, modify the `USE_SANITIZER` line in `CMakeLists.txt`: + +``` +option(USE_SANITIZER "Use santizer flags" OFF) +``` + +To generate an Xcode project, navigate to the main project folder and run: + +```bash +rm -rf xcode/ +mkdir xcode +cd xcode +cmake -G Xcode .. -DCMAKE_C_COMPILER=cc -DCMAKE_CXX_COMPILER=c++ -DUSE_SANITIZER=OFF -DUSE_DEBUG=OFF +cd .. +``` + +Now, if you navigate to the xcode subfolder (in Finder), you should be able to click on a `.xcodeproj` file and the project will open in Xcode. diff --git a/index.qmd b/index.qmd new file mode 100644 index 000000000..da5561fad --- /dev/null +++ b/index.qmd @@ -0,0 +1,45 @@ +--- +title: "StochTree" +--- + +`stochtree` (short for "stochastic trees") unlocks flexible decision tree modeling in R or Python. + +## What does the software do? + +Boosted decision tree models (like [xgboost](https://xgboost.readthedocs.io/en/stable/), +[LightGBM](https://lightgbm.readthedocs.io/en/latest/), or +[scikit-learn's HistGradientBoostingRegressor](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.HistGradientBoostingRegressor.html)) +are great, but often require time-consuming hyperparameter tuning. +`stochtree` can help you avoid this, by running a fast Bayesian analog of gradient boosting (called BART -- Bayesian Additive Regression Trees). + +`stochtree` has two primary interfaces: + +1. "High-level": robust implementations of many popular stochastic tree algorithms (BART, XBART, BCF, XBCF), with support for serialization and parallelism. +2. "Low-level": access to the "inner loop" of a forest sampler, allowing custom tree algorithm development in <50 lines of code. + +The "core" of the software is written in C++, but it provides R and Python APIs. +The R package is [available on CRAN](https://cran.r-project.org/web/packages/stochtree/index.html) and the Python package is [available on PyPI](https://pypi.org/project/stochtree/). + +## Why "stochastic" trees? + +"Stochastic" loosely means the same thing as "random." This naturally raises the question: how is `stochtree` different from a random forest library? +At a superficial level, both are decision tree ensembles that use randomness in training. + +The difference lies in how that "randomness" is deployed. +Random forests take random subsets of a training dataset, and then run a deterministic decision tree fitting algorithm ([recursive partitioning](https://en.wikipedia.org/wiki/Recursive_partitioning)). +Stochastic tree algorithms use randomness to construct decision tree ensembles from a fixed training dataset. + +The original stochastic tree model, [Bayesian Additive Regression Trees (BART)](https://projecteuclid.org/journals/annals-of-applied-statistics/volume-4/issue-1/BART-Bayesian-additive-regression-trees/10.1214/09-AOAS285.full), used [Markov Chain Monte Carlo (MCMC)](https://en.wikipedia.org/wiki/Markov_chain_Monte_Carlo) to sample forests from their posterior distribution. + +So why not call our project `bayesiantree`? + +Some algorithms implemented in `stochtree` are "quasi-Bayesian" in that they are inspired by a Bayesian model, but are sampled with fast algorithms that do not provide a valid Bayesian posterior distribution. + +Moreover, we think of stochastic forests as general-purpose modeling tools. +What makes them useful is their strong empirical performance -- especially on small or noisy datasets -- not their adherence to any statistical framework. + +So why not just call our project `decisiontree`? + +Put simply, the sampling approach is part of what makes BART and other `stochtree` algorithms work so well -- we know because we have tested out versions that did not do stochastic sampling of the tree fits. + +So we settled on the term "stochastic trees", or "stochtree" for short (pronounced "stoke-tree"). diff --git a/vignettes/R/IV/IV_CDAG.png b/vignettes/R/IV/IV_CDAG.png new file mode 100644 index 0000000000000000000000000000000000000000..7900ff501a89a14d1895ce6bc6a217ea24d6118d GIT binary patch literal 135771 zcmeFZ^;=cjyElx8n-W+w!lJuDi3QS)bc29MBNB`5u0^MUQUXd#s)R_VG)PLfpmgUu z)_&e+d(Ls6_YZil?=Rv7%3O2Id))V@#*9=~mB+y%$3j6t!BJF@(L_PH14Ti({T|~l z@SSCbqIB@(mW!sm6iV3u^alzG4T_?Sq_*dipPA^j#A8#R^voOsW9L$E>4I^FWqxX+ z`Ls}du9Q{zHmSrhTv*v|^E2|!t&Z?7dhMtrcfYXG$>M)|8hsN`UVAJE4RXQlE4p|x z!9TJ1v-+g3)aSiuoA;X6{0XV}_33ha3)3x31qoDwgg`U~n*aV+8g$eqb$DM;?&p8` z;s5zBbhqxvGY9|iKmXs~mq2mrPS=cjJ^lZ71&Nknq5pBw-ydZ~PNIcZFlAfeUq2l9 zcj|Cg-2dgHOQ6=_gO85$?qSlO_w|1+K?Kdb{cm@T{HcTk8WGe*`bXUVT#|o2-5vS$ z|Lqqu{O7j+S*HK~;Q#Zq|EQV&6#2jRmjCSSKU>d#TH2p$g6!b`>E!>t&HiVA{bz3f z4@mYOxbg>x{0~a|4@EAam}a4IXk5JBya@Vh$x?%3ur??<{em`xq9e;zM>O-90@ z54paWzajDTJ?Iw<;{5Kv7i|*y^SIaK`u=s}<$f>vb^qY%OGB>G+NG$ylNWkVgsvM$ z9OdySYB! zD$m0&-;(#SnTCh zj|!W%>-SDNRbI;^R$4}r>{&;Xa4xOe5fr`$WB>V9_cZsPvk#v@LffM41wsp-HZi-W zqkHSpXS~$F^g7ifyk^e#0ao12*vpb5jWnkV|dm|O5`%$+6Z#H!SuEL#)sN5fBlI&;##SG7s^`v^6Rcb_0-Cc(l*r}8!4b`FH?vo0eN`1I zrXz@OzEJ7MSRdu|S3dGj7(FQcG;N#XvOch9U7(uTI=xj&DDZPe=-U3*$BDS5JQY-a zv0x_bv09RKoRP=5g)bxBfV*ULZH zlfs)mG&m34{r%)tE+W5Id|u9E53Q7I&(_l0NKJh4qZ>Jj7=s{tTskRh{6?WLJf*0p zP|1P`Qbw3F$Mt{^tuvj}zGLeLe44Y&nS`F$SJ#biUF_x?{q#L%-K68GM03A8^>Wt4 zG|5XQ*>?~xH$Bt+%yn?M7yCj%qNS3+)Q9Dd;tNFkszwv(P6YQ_iezqWk0fkMpq0iF zy641~|8*o9`;}apw1`Y!UZK*}`BB?uNjcTyPj>e<$KO*3%({-UY`*i2{=v~pzwz!s z=9Hn-XI!nrCSUax!JuPnDCTh3*}%gcnMI-&R}Id{+*(Q}G3PRjnAv(aY~y;@$o2Xj zru6n}bO&_YS|$CMBIp{SO!ms*o*>Tq&2hd z-&xqU>$l1@p0} z5IvazeTT@nNYxDLmm%zCO9aKN%o1|3A`|oVbQO)G5*5ZOVMSkAYHjzJepWE_*<(lF zB3%9Xc?6_rx3*7fn397YXla&c8(oZl3TE9A%`x{LG`kegOo4`v1J{=gBKqzQjSe?RFq@2cg>{8$k!-3YggY)1M@j*P_$f3ZV= z0B92zziMtPKk-})lZyLlN4>#(M)`x@utfKLD{TanK)Nkmz@A)IKOL<&7<3;U^^>%J z(b0%77LMsdJxAUunl8OnTx`jaZu2{tC6yIuO|Ir#)ngEO>a*^I^o%go*g!kMDxt z-fJ4wV9WUjoI@l?r~W~7YAh{hYU|J1DJ|YWoW(7ozl} z>vdl)Q(X4?r8p{9+yL6YY{ueH2?9bBpI;w$zk;W9GUvk`0~NYoe=c_MvpPFWUF+~2 zvgNpKjhljcimoGf;@=m>Z8G7iF)FO?B5k2v_`aH?2IOVsm~q#_tO3c@4SyxS}MI`Bdq`2`GsR>kfF7K zObXc#-yNGomD6}=buFnLy4$6R2V6^yy}~5SpH#Xy{?(=-Pfa=g*rlIEc*$;sIj5bq zy3Ws6;5@_BW&FFI!Gfl^5i-KmficXtPqjNZi9NS4s%dw zJwqMLJr)6yknY;I&!nC1p>@(qhlheu5%z2}zXL}jiH?uauq%$Tts~Wl;5DBERJsha zyv#w?_44&ME>IL7pNss8{U2NiQ{l~H0tp-F11sgz!%?+1&YBql=>jJKO|tyA3dbcT zS8R4)j>dERB5-s3KD%pJ*iBVg-pzjEP`6radBb+_7B15IK$y~c;O;WBGRAUOEH-lT zfq|iybM7Y0T>H`fy8y|=kJ-;2Hs5NUCjCGEGUMMdBXwkVnTh(Lj}_O!H+`)0#~4+= zg9hnt1Xzv7_jFhP=ZaduxfT(Hl0du6j0chMkI%haIa*zDw>+}!d(c?u9=LOu{xEL^ z2!U{|?-gO&sFH5A1DNWQ&&$7t z*?s<_Ap82&D86as^NBKE<*UJfYZsfLH$kdhmJ7e+15|;dU_kpXLvI@mBPR>yWR@oC z}fkI53fa{aOCToI+gS+#3w;DcC){LB5-5z<5~JQx(CV2Yup zF8jP)uRp}_9X^FWZrJ31RWaCj5iaUJFM8s_a!l#J-^08J@G)MVd^|348E-p`CYei! z$$tW%n~l$51{Re-vc`J+s`}@Z9^A-1d|UF5GNK8}##pd}f=ykT<$H5=(v=Ylmd&=0 zJMHn-`1`yegz}c&nav$dtZ)zO%0PdMa4%TUmkF`g`*R8rtNaZE<3-MG((ZdX*Y0&vm~VI5 zwM|Vn1>6jMvi}ZWNt6%Qeg9bJkd^rAy6Jqggvau0%I-jS;-VU)T8;`c39r4{msVP<$f-kWH_2@5#Tkg$hUz;ALG) z+=R@kF}dbFf3rfU$Ba{sR)H$7a_Xb*PsNP8^EcOmzQWXvikq?5)jNwU_CCTJ2pn<5)0kKhhk3j3uHKJONgzdnM;B4Y$KWd+%<2nqlD7p zG`e6&Fpp$U9Ib@4h}mDd{4kBO{cH zRaYLfl`)knt5(kj0a z5z6pC{}E&qSA;JYo4l7Gjf+Y7(EaYiody@1$%-7bl6$wDg$G{qvxjSC`)t=~ctZJY zwP|mdmUhPt9Vw+t;zf@qS*XeZ4KzJlO)a+)-E%EQx~iK3bXkA)(>LUV5_avg3fppM z9V-dFSZd076+*lOo-5<$u)#U!@HR@k5IrJ-N!qo^>{o3*6WfB3MX^diy zEkwXRF2_BT)0;_@r@GhXWIvXUk~6m46(4WzJ#Jtv%lit4_kQfw9&-jn_-aL$v4-B< z-qZJpYNA-=;hnDHw>y=8HW&UFl-zGaInw52+dz!CCtdOkq1K-+h`05==1J|3{B)^2 zx!6{=l2U^1+cqWFXz=$0B;sH!@VKn?!IxtNJ2T>nG*A${S3vWi5}2|`-gsAPYw*ubNO=$(*(Vv3nb&W zpZ>sO)}FpNX&#Sh0)C@Zhb+b&gCe&7n+2$*gu$PdS||sRVMlWiud&@g7RO?aywjsw&d&J1Yuzonk!6q&}pyq&jvacg)$an;(fNd zyIrWUm>bQ7r47rcmHKEq#EQXU_y;I&9wB^l{|IaVLRHbDZ)p>b zGqp}(nHmghA4XN92Re;CX8B72|BQ-Nim!j{o3wkTCU(XU-cJn>c4aWXOQ2jOS+O>? z|LNjlnt2@N^RhZCns3ugK^$NLum9Twj%JjI>*Q`aXKN5LlOf73R2OxeYdH0vi3M26 zz4mZKDQ}?`NL`&$yq{jH1%6#dj)xbWCZNrbS!K$|x31!e9uDchYfwyP3W9s5BTMGr zasw%mC^|*RkHzDCxB+j&5hMc5WHDbQJsin!W}JIDHC(0aWI{iT`JxsnK#03QKe^gn z2*C>%a#jUPjy(?^nH*Yw@0qqC<;m+-fI(1~dx!(Yi-rPD}0Z@Afre zmF8XH7duS>IYZMb0#gj7HFMsJub-Di>O|Xrj7A|Y-U(O;;&JawOB;3C{8ki(n^aQ` z8o6oHt=p}0KEH^=tUA`F`LBCNC;-3AS-1Xk^LD{KlM|Okc1Edn%fxH-1rid$!Tq+QN+||*j!jls_QJnu$aj1lTXW`E zicI=ZADnmaZ%qXCmh<7FjSpycxfRIK);a$Sp{UK2|CnD0>Lj%3z!BFYtsp8SiX>IL zYw3=EK`W}Q`lB+LQfln{jzPUP8JXVn9 zZ3_bm(cc2txW0Mp5{el&@&B@~RKXq*!?>Y(z|`>2R=f6eDZ1@sK7d+06!8JjrHEL8 zX_gGnrN~M;UevemJuw#i{Z@wM&AGa)v%Hu@z(v9a9teWZL;PvdEV_0oDm@UlYl$kr zhx-qbppQVQJBL?jaQww={&j4k!nBR&X>;HZ=&108Aars8!_b4j_PkDa7fm-mJo{VF zXb!rKO$5mm1k&A5IRLpeYT0ye%~Pw9Jp%7v#tIR}jN8eq>FygzUZZ0`Be)voinI!L z*Tqe!Uh_SJp4QY+L_3Z^9^Z5{cr8{7x#W`h{dkwkMdYA{xVmSZfv+n3<89i`DEwk6 zoxZ3GP|1hK$439mF{6%MyyG0?8n3 z&SaT(`tO?;$5)upVAGm+>Y`j+ju($Jv_vX~Z_~jKR$IyY@=k z>tS|siW%yj2O<V>aUdZ5WLmK7LbKLkyv z&62xew>7QT>D>$6Bx8KSH=cdB@$TXP&Suzv9sr=~9dclcc*2$t^in{kSh8YQ1>3hn zX#4o_@0G+r?cq=Krz5XAm4&Z-yn{{B9uN=ZXY0E>9OXd{YKNIx7OwphButitC~h}h zY=53yUvX`J-`y$o8izMA`}zBix1!xKunkI#`)}uSyKM#vA*M%BeS{tX!3Ir zr)i-|ZC$qQANCm-X@WW+G@;)6D?N*!zB^#woKG}ib(q!u1^Vm*SI{6b+^3)Iom;az zplb65>=zZZ4xv+FI5wVHc><1yEddaBcE!yt^iX&E@8N)Hkq8#jZGQ1B1I%q^BA+uZ;&W)UeGAct8y+{>Gt6PiM!VdamcFg6qNic)LQ!0X8u{KOURi;EOt00Ie}%nA)Fg`^$P3kLT_J z>Wf&xIoRRGFg?I74}Ufs42-a8g2y8(yzBm3L(*nOHDIQvqeG=zT8y~4_E1*7SxHt4 zuX9-s2Xjg=ToEw72_64)L@XlVi}w*0=i?6;#-H7=w@SabKQ+yoNn*V*C%E75p}X3) zhJ+_dG|eB6k;I$3>C)NtJ)^uQ5I9?@Z%(^B-+#9)d7oSTU3UhpRxbdIFy7c66~S2| z?C!VY#X2Qq!A7t|Z{mk`GYbm}BBo?V0e0*l*3^Jsz#^dmF z^7LW!J!tewgj67djPR->$4*@VWRn4PNtZ$7VPxhr(=WA4_Zj0Ow(twgBXB8mt1E%j zrD4OsKta0l7Z6On-L^W1q@NUYw+e5SYR!!vUm+KrEw;J&VjJ6I-c9cmG)!O@5Mp)B z0vytY6p6HyX_#I$eHsaWJ57b~VZ=k9x&|c{&R$lVOQT+XHQFX_%d8|&V=%*?$L?LtlmT_R_wSpt?LHPTzj3~A_WGJq7kjH+cA#@C}aGskjT zBU7|dKBLi+eLfJ%U{F45T6rWo`c~%BkWjf zI$7beULlt$oHIcFyxm37i=o6;S)>e&L45|zbGMEE7jPr~#{3QAPAW36)-Nh#`~i8w z{1Y1KXjN!sP1on})fajsxdf*aE)hJJ$!cOka~`iA&TU0KpJRcMOn81b{0ABdkFD1kj+1F2WZur5?GUPplZVrl(VoMrem%uv(L1Y^7K|nt->A&G%nO}k$gq8cSKYFDAmM|*dNe$ zePW6N`e-LK1vblNw59o;GXzCa<1n+)>u7BB0u!KzU;?KpHfMaL&bDsOJLg$4lW9Ny z#9jOEpZEo*tL|oHzW^;NsyT&KE4TU*c;oSgwnlmGvu+cC<`;!|arZO4i#Eou@o*@P z^*vV-Bh0;1Jm$O#h8BTHF9P*iN!-g)S)_Fd7{9Jd_>~iE?i5?|1^mYU7h}FqMPXi0 z;%x5VFq%&lcWnN z_9L?L-B=g>dt*R^VDuCyjh`ej_dPJf&(4`@34y6x$~_Eeq9q$*X^0J0NhU7 zm*O!HJ7_{J<;~SjK*XTfjfipwiGFafGv^0 ztVMEnh;PbTL-8)_)-4A0L$oIV^6u{Ua?4k4fEmWQYXc-$*mBGRsos~IZRP$)B^9k$ z>_npMw=OeKhOww)s3^B7U_5v4-G93qp6Nc#Iyd13pZu)HO!YseF zSG&L$hyc@_d-$wds!=)uSHtM)mtz2hbXk$JJ;r0%LH!ee9~b&MGl5WA-jPCDg#XA7 ziQ0Xujj|i4g!D*c0~34&_F-1i`$%-iqFn7fRFMKy_d2sf0!!aL`WuL zp-_90<&%q@`3+K}!LbJTKpdU~$GOHw7Y1ToC9m+hfL(ZWgia`%i{ty^3#Jo6fnuE- zumbFXfI@0gQjr?HL5NiCnV%y;qW*U+Z4#-)Loa8Hz7l}Zp_T>rB@B?Caw5v$| zwHCx01SW8QO@G7&o?ZMvf(Uw-pP~dRHvXHhyGZMXnrN|8HI@$#Xn;|8p+wQe?jvt6 zSn23bh_K2M2SP5G>iF@gO4!hO!bTNngDGBe@?$kECava70g~kK;-ENF>V?1Sovugn z+qZt-!Y`V@H{CBo`J@nvjIy%_zzLMi) zWUaXSE{2h|0Wq1B^r%(}Q@kKpFuG^u>0$=ld%JcyP9tp=2Oq_!pshaT^He`vPO+ zhJJfypOq9&1Z}z!XvB?w;a|@Ur+UjmYfE$W&_{Ju)ZJh{ez%9r*nP?psk3tW#^g$a zju|_A7fjg3U~_|@@uU-0*mzRV2a4bO>po`1u2NjMXH;LkD-FSvaq1*zj3tKE>K2J#&0=%!TW7lI-mT29 z7#QI&Vt0x2Zjf5J0Iq4<#&F)U z^gZZe2I&&zGv-g*Qx7;!dQIepK|W?Y>KV(vFC@e>d_!TcdXJDPv`8zz{kZ^MEaVD8 zYTA0_6*7?7Ru2+2nHQ`LuUuBCdiXQ_;hfB=re4OmKO+W513DuvUo%pxzH^d9S#4~- zTKX`mh8IgNfW$K#8CtS&Cf@q)!?L7dE(1aZaF){qs?fc<&EgU|LBl-=04!ORp9=n2 zD;iSN9{W9zV+iZf%Ktkq5UxR=#b|vOT}sBnDF`+Wl%fr@8S{A2C9_0Xb$g8TCnRr( zCTI(mmAJnUVfXDm#8x7yHi(0_t|A^Bf;okYLvbBXZR&~@-+Ld`Ul)}?K?;B10}?ZZ zZ_?Q;zFs~LZc9|(`U-(M1-V>Bqb|qX`x{9BGv0LI=M=9v%@Tza&9>Y=sc$eY6%EeL>K1NbJNYW@tzW7#x> z9S!@u)A*1i%rr>1YSD&4kp3)P|FUA2MvY!IeuTu3cskOy%7vtcS(^MBEAKwxA-UKR7r2`4ePKOrXz z2YVJ<9q?Y&EFZO0^RU1$k054u-uXiZwrVQWxG<LxW+J34+P zJm(0eTB14mfC;uN;MLs1L%pLgT!*f_croF6IwUnFp@Q-Xpi_B1?R)PzOqTS_{2+UXXV!K7qIhmVMBaBwD?`+hr%cyEXDo=@dv^;Z`c?ofSLU!}TJ5 z>{~vpWFZ^rQU^kM=ScIt3q57^Rl22U{!U3F5OGR&COxhG*}`w8A)6qE+krc)h{fOjCs4h|);5`$=9@KWF`4!f| zWa?ay;rz&EE?0K)o#Q-~M_J0;?Rnr>YopzwI)NM3T+&p1 zoW)Lf3S3HPO8a!Ft{zK|SFRsDG3mqzM~uv*;wvc$$cT$ci>a=m=Te(lK9l-6vTI%T z_Ci;eVRaHKjlf49-A!4qR!qLbogZ16*VESy8*e%Rpl3;q(&rw(%tEYJ4`wFM0dGW2F z;K+ACJ~V3YX_!bPhE>m4gu-h!{bdGaFXqv8jB+tTd6Q}@Q(&}4uMd1Glrlto++_$Q zZ;4ZPpxS@i*>+!_ra7N&bmjnN#dcTv)V0TjMF<7z1A+#zLQMDsAcR;YzIN+GzqQWE zV98hQy=*i|C3OsTE}I>!W<_)LI%Xk_xG-72N9Y0f_2s&Fmsjwv0lrQb2-~5bBf;{@ zR|e7zpp6zT=&w0i{qoEUHvdH?@AEtTz!r)U-NKyZdq4cFiiYBWRo|f0C;iY$6yeRn zJk)JD65bwB4Sd`vtRFV~-=8^`R-CpkIs+cm0eN0*U@g6tG`a@_m}u3me!=e?;gZvU zf@~@UqGJK`fHPy^8_>@WfoO>50eP)iCKk$_F@Rw=wIfPLR_D|cskAKF7I<}nhqE~m zPg@<0Jz-3QPi0X-OJMA~*AZ5;@J71gZ@1X|eJ+}Od*zptxrg=VRYDX(_;Io7W0rZb zt>Kvup5$I3JHR$4bA3*v7KzcGd_pQrTwhj_YU7jE6IS~_lCQG{)0=ih*Q-{GcU}Uu z1Q#h8O07!;KtdmS%<+7%85UU!w#c6Hv2!k2au)?6^0JWAyaEpGOBb5Kl^y?+&scSO z2_8uzaeb?H4%Si!)ZQC8EL7Z&PTx1q>kir(0C$56TU7iv5w>N$WrPo7*S}k((A<)^ zkCo50{I<8zB`e@#50;wE7e)QJ1{sl>Uq$uq(j+Lrtg?3|SVLIHOfUsAl-KB%e z*6J092g|>ZIAU(*C!Cl)VU3uJ7w?1yBD{)Zt(SxEn06Lqhu^!G3TuVr-U}womQ0Xb zkgixfVG!6Co-m3^USKlWD`=>FUfSs0w;n~-<@QEoiDo?+O&)1Z7e^P$lJS(MT4!5Q za-IuhkGeNy%ix1dH*p%kXPolHp`thM$?& z>L{fa(qu9l(r}p+>7o8lL@LdGq%HV+5BLYdM82$N2KXZ*nRoNJ&b;P&uY&7ZrP<;n zv51^GnNK*3OncI<_u>O$E(|gbs!PrF_x*?pTJV*`tITs2YCp(4KVAn3@#9mk1pf!_{^GN)M~}h$WOmH^&-nMlu33aR7UJBexDP z`MfPw{tR{wDtwTa+a&U~tCTf*gWM2bX%j-JoGV5kdxgSH3CX}moCq$bH-2iLn`oJ<()|8jO)qxdW zHMN!=5zW=W7&x}uC_`&9$_wkIdZq1d)8FOr5{BFS>&TOLnC!G zwN$P{l1|DH+$oZYr)?puQtkJ&U~P!bnGK-ez%DXeau1TdURuXl&YT`n-x3YGbTLAc zZ^R>sEy~!^a1)9OHg~&+IVk;))UJISwP>0(T76%BF@?q8eR3Gr3-KQGFfCN{uYhA2 z;tVvpXQM_>gJ=Zvnum<1MNqoR*LaGEgk#lA;jpked2Ma zYg#b>`1cWd4*pvtPgymtfJsVBDr%sC?SaOH4Q1ib)+eHKt(;beX%79{}%Gl1JfFnoDz|92=H`Ko1Ic+Q5jwECT8H2o)_01E{+zBWj?GT%R9FAb#e>Ei%>54;-FO5H}q!7@&q-EC$lS`EVvdtS0o9_)XH{d97nx4S>67M zQ=3Vt6|A#nkbL1JdJ~MDyU^|5JJM7YCwRun4frXUYG(h#;PQFfJUYHea^s@u~3ekfYfPjxLPJHHsJs4{w!Y^{|(;a~JlHrwnp6Gd`L2Myk{gXT%oWSsIzxTE z8^How!dF~e!MfwCdE>cabd~L@7)4w+wYaGR$6kI<5!d`^`)j2U02Jfd@ zetal$%w}W88PqUJ*}RUxh$FICrZw3Tu$RircJ>?*Z0{`9Ps@nUz|Me=F0sipH{4ub zU|(1$bJy;+vcTv#)?e+U!fmNMTLjaOrCTcRH;s+0gFx+=HHQe{Z?tKNhH=z(D_AKs z3@`dc(m*Ftx8+h*Sc%t)2vg0%e!;foi}MhU^@G78-q)a3A0koZK#(D-E%{|-M5#^A zxW3)U!Ye{%oX0M!42|wqnHxgu$3^Q=oVpR!r%MnY;zHbvM+UrWI#&3+dbwXR_q?NB4yR_4%CeBGTy66I;)HT^&&NG?d| zJLECQLrjp(8CUVfd)lBSYrlH!rM*u}uq1GRt2#sdGt&EuV~a8XAwiy#Xztcu;>3xPEKhSX``NpK`NDLUwas`RYoZiQQ+c{#uE?V}F&Y+{ z-!jw5mvVI$psUbe6vpH+8OWxl{y+mGAn*4;>|mOa<`g0X`PwPV!eG5{1Y=9$(76BA zMZ}8!icdsE%8cvTQH9wt8r~-mC_J+croXa4dJ}AYi)p`fzJ@8nxXP3|192*y_TMVr z5JCgk|J=!vlw4=~~~Mw76HQcjE@ zP)86LNfXp;scbm3oxcE@#(diJ?>C{STcP$$!Ax62Wusy_npK5XQzSeoaqoo6k<94e zpX;ru68BITOlrPa8U@);r1?+kNrc%gtYrXCX8)PtG7#A+sX`zXWkL4K3K#8>F2X7` zv?mFVo4+-SREL(FHp{30Bg)s~l(Hpq%V=xdzBP*dJ()-4TdZMNRP{vrYlffnjkLRo zKJg-PMTy_JDKw2=8b$RArw|#PJw4pm{RaG0d1}885KR>uF7xEp60NWeBIiv1mm*@a zp4XuDhApum+W5|6ZsX^?@^`t(;|<4f(sr_1K3KKyll9a@$q%@>#x{;e9*?-w)e?f< zWt7D?ka@BUXZX7CapDR44sGwH{~C-{ps-#ALo{dEs~%u~PlEfn<}R?8UN*MBD8PdK z8k*+)XJc1bpmMnChbFT}L)wyJ?+6aau_2NyvUp1083YR95-SA`)nRkjv#hy7dCg>8 zQ7mQUNro}vR$OPQQ#xfi=7F{&BE?rnmCYG+g-YM%m7V(U!P83|N33s zJZ%+zRqR4q|6+IKUUMA8YM(w;9xlV8Z1^dpXE@94mSmQM z`{kJiHYEdPDQUx2c-x3&#Dt-IQ@f{@oT2KX-kSzQ6hYdwuSHyeX%MH%v;XUbE-kq# zg4IBh(U7(tc`~ox`zUu=P$yi~4pfB6<%+Fxxt>he#wJfzE&3FMq9`kaToqPEA(GJg z@e|1X=ym^~8@K`vQTzlabQ&YBLH9L6_hs^821so!k!gOJ{q&t=f0Do!ovRozQ{eR{5{)Aq5#>dHR-lEYX855Qp@F_aIO*39X- z4Q*=;Hz&%ek<$Lq7oU@~>JmDVbRDeb5IrB%qGI^5jFK+p07;DmTqcyRY4&rToNP;? zNHoBaEcAnB=c17mMr{y-Dp_&3dq;*F=VNe4jJ#t=Q>q~P%BKR>g6~0%Y5>k3w(95^ z6U!DiP?%q>>Y0S~0y#wgFf3t{;o36(#rwvF+KK7|mM_+d0J9p)!P^s4rHb*P0Vkj5 zJBa8IVT{*M)nX;}4Z=u|_y*N!d2gXT)!iWy8c!A`!kSbww?5spJHW6BgZv|n@hFlU(k>IR*y zm%GNuwc$nSq2VhLfYp*DI&L#zmA;iz5GoUfI>8_ZBvAbJLe%TM!ymu_;bPTB8(h0; z3ybJY8gk#3jS)0|nQ@$94vcivJHh!XndBYoO~4Yo%QZsE^a!SLo~Z2rf3I}`Ouom%I31}4(oop^(ijSTWk6PXfK~Dj~At7#3I;WL`^r>5uSx!2kWx( zTvA?GFPP$~vpI#h@l|WZXjABA81bD3zZ_%r2M~mN;-QX>V96?dzj=LGM|A!uCmt}U zrXk{XP8dCmj%d}`jl!DIi(HAsVG%8fJYbtaOQvrZtN}D#Oa;uQ-Ca~Zc6XOmyBH7CBJQJ8x~gB! zjC_y{(;j7BV_f$EJeOPh-D`9B?3z)X1AS=z4xt%s(S4Tb&P%SHBjGw){Jk@~X?a;u z84gz1eMg9%)3}LCKR;HS(t{k^U2D)Bb{D)he=lqUS|L=ikaJP5i#ov9mAFzrk5;Ub zhVJJl5YBN*OvsQ~6^+{|7e+bSw{5Mwm*&acBizIE zpu??UcuDhS_l#KY@sEJuVKpz(>VeMJ1D~n}va!ZMF5^ivXT)TOKFzLu)77yCO|K!G zL56ovmW;w7vU!#IbscTn07wdA}2~ z$7&98WgdLzT_OxSzZx&LDcD~ZymamPta-xihliO7o26PW zmubkB$wiS!t@KpgtE=Zk4;SQ*zApaM_$RR|@q+NF#^6%O9jZEtGf+weavOZn-Xs9E zeR{1#!8#(;Y=ef@KkjI->)w-|d!ewP1)W5%!Q$`Az4e4bH7jIF zUi%|wOxE;xJo&1bx*MI~v;x5sAepr87+J%4@~;@sDNxnd2Qsnk@QE-_3M$Z+lnG=! zGf&<{4Ufg@*~7AdnR5T~hgv|C>ci0DmGJMN3ARZ%?~D~`Ie5$_)zVZrH3-i8K8SMp z(L$O%1>)U#qgfaDhRpu zxqOQBnKr71qt;@=Zj7u%dg4L zn4PSbv_Y(0+J#t1DV_BC%O)lZO!tb+^RM3(--=l=Bp=%PEBtnli0#Uw-nGa_imUMt zw`zzu! z`{r^R9}fm9QDk`e&f_U~)8(DO`N75LIv4ZPtxN8v7&2tA2YFay7a%GbL0xByJwa%c zfd}81TakN)j3Jc+O#?%h8G(9Q$R$o9z2`$T3QvEXf|PlUq#lMnKX z5`A_|ZqD^wrGSr1WgYaUGX#$cvEb0RMlAyLn_PPQmREJ!kCz`uxCDSC@1Eo%g0nPv zzw^V$V0y1R$qU`rNxk^IJ0OLK;n;NLZaQ!fi1fp8kjJdz+DD{HrbxR$oF3h*P+(nh znqUOog5q*ST2$($I(Fy#vYH4!@|YiG>`b0k8)bJ0?RnrXZfX@`XQf+YtF+q87N_yE zZvY&F{w=|IO!|X5gp#?@^n6`Y#nn-HKZA z0#?-X0e!Fhe;&@$BFu#bHtNBmXJy{V zWs}*d?4r8kL5M5IE~8gI#b>j-03V$7=t3tV(pI!b+8)mYAhGyAziyVXzxSvmI04a_ zv@TvlmhizvIT;rAUy=&WY;^7qNbzywfkhF9)&;Z!S*P@j4uh7&Wg6`#en(@&I;Q|C z5E+F*1&Jr6TxC|w2dWwM9KULP-4VY$Y1$J;+AAGfcl^HkmlI%@Rr?BN`R?~*3-z9D z(`u_`3UFTj$c@f5e{dl*a5W9WdK4a8B;~V3X@;M^dOKUXIQYh6_OD6O329buo5@HF z5J&htned|$qp@3>TanbW)h^f`E77wVZ}8ffFE?@MVz>7{JK2;3aUSoO5bOWqyriJC z_8=v;Hga@aXs!AaRwxuUR|a0Dk|rK|sX8UA9F~>#u$%{#L(Bk;j>s6C_o?JTjh?ha zP)F|}kMKCm)_1%oqri5Oc>tp&f>0?Vn14dS$%dctRAf80*z_n|RxiD9CicielbxJCdeHxRn7C4_J=poUI)3!X$cn4R76wHvlxu8>t_SB zsL7;>sE_PeWp9airgdLsU5KnFb7Dm-DC!HE4uEvOl|_1LV2nb4IK|7G=Wjbj!$n>n zrp{z|39*&T3wA$vOoTiU6!w3Uc?5;4X!;qk;6)4Jh{eWc*~A%KjOb9kMHESRdxu$x zF`9nggJX`Tf1mq#oIMQ^QB()dD+}{sXq@GDY*gN)?Z@GJD{LGlw1h0@-s7cBKdCB> zD=FS^aW-P~7|qRy@tp_6`+WQJHWsYk2KG=%7cn5_sX&}tC#Q~q+NHD1q=H49JB1#{ z3U)^ahyJm7BapWiXHVv^L(p-ea$1UL6EdB@vhGm@8FF&BAkGxx4A6MnuSypXkXM?% zG$dB*Fc%@rxKLg^3IPtRRfG|vaaib)*2mX=KY@LwQQG@t-cwwPHm4buR=s4jDK5mJ zSP(cF)t$Qk0wj->c%d0035?POr2OO<;l5OHpugA`0yBXQJ$8%##Ec5^?kd%n{s*S_ z@y~nSc6F8;l^rC*n2(wus#BdY_Xx}CH;c)}5>(!`wB0H@K*V9xGg4;v-jDW7J8Ib& zUTh4y{r8JbKDSOaoQPe7x=k1`L$XYi>)@$j*7j$m;9WuY6fb^n5E98i9!?T%gi5G# za(eU~Ol(dPR)((iqczUQ?ILcqsdnJxQtMPj7Zo=0fc=EAhp}tZ+wi;>8D<>YJ*(Yj zJlv8w^;#>RTj=gp-cBW6x)be7@}uFU+v)#9)_=!S{r~^tI686=jvbD1>>^Ueu{rik zR%RKM8OJ7@V`Ptzz4s2GWY03Pq7uiJ%1ns(-p}*%>e=)C`&}-ce>|U;%ah0BocsMY zuD9#;di%ZQ)jE<$+{+)HwM?X#*w$l}kekOhXLlSjyt7@v2hRXa# z7A+Ceeuz)dg{!nqi;sg}f%mZ`l>BL(G@wc?Z&j8F{IC}=f<~LtX z=~|QP7!_LUo+;b`>+5uw%~hwAv{tdtK0LLFxZwHHvVOYmcASsSOoGapN*QuM=K zSC*2+%^otN=R#@XUJ#WDc67%wOByXRvGf{Z7^k}2PR5CzQS03{<>c8Uj>T?^i_C3k z;fG4E`(nL}2~}!l%;M2J^PuFoty!FFEF@yyyx4PG{LXEDKm?CvC4Y5J7oXV(YuL97 zq*m`DLvPk9dptsYydt6E{n(xcZ|RKS1keE1))Gd(wtc>Z6ZcdZwQrgKox@zgr-Mhv z)Kajrkc%H-QOIjNVR=Ebibh<-U!sRT7#4@>4iVkTpXNthndt1k{^_!pqD_`uq^geq zim$bDRRWIc0jH4Zv)oFVLgYQOp5MLUM_mLR5+acMy-$8ncLgu$sM)gV!w*e{QXvWY zbA*T9*uLsYfOcF4r$>U79uqF%Uf&r3_3@|q;g9N;5P zLBAQdAgeg)n4iH}wpeptVXpAm$kN*0Qa@tEr582Iyz74ZiURDIg;|te=4mTXX460B zO`f@&W`JJt21KTam26IS{PFsg(BQo+`Wf#%nWZQI(J?K1Wo`TB9Qr}DF}k9Vl_}mO zT!*CSbtUYL=8SGmV&(C+0lMng+V-zrt<#D)K{XJNvd4$|P#mq9N4EmE=U*2Fp;~Nn z@(svU4mg5BBVfvKrIsV3&Sg&xGWs_`_$Lvfk4(TnAN4!PSJ28(^bZC)oUCa$L;R-H z!IYkmB9#p_!F>bLI6qm+a7t_2(>NFgPssV~bYl+?9~hIDqZa5_8&O7VfhH9@!`m38 z(iuJN_SHK^h_!p1+swyb(XUDlSA!PJ_1y@NI$;;_iZkU3%AEZ z7g=xRvYdJMGXytYEEghbIrf5S{I|zgXcafK7v*(t*pme zi(872zUs82lv`r67NJMkh+6i;yFXJ_jZzcr;}mktYgBH@9O&be|7{OBE+(6b zeuZ57o^@HW{iH}dPgj`Yvfge8Wf@b z^ty9D0;FXEq{{r-nAIsm4hB6n+bdy0Mf8Eh17tX=ieJaXG>*Qmk0 z{=X-642li0s2AQlk1)*fSW6#o4UX*UhSnz%(reRYQRruhB>H~ORcv_(9m|xq(QaAa zdWG}pxw9c$KqLO~rz|$8ZlV3;{Z*irC|ZaImNk5zma0tjYx?^<+5p7+k8KX=AsF91 z&AjD$r{wy&QQm{8vO28n`ID135)3Yu+z}iLcNBAtx_(9r7KFM0Dt?UVP`^Xu6X=r_ z6HRUQy{n${7NIC!j=%xw)aIV@Ih|9vCQdoRRUEt`OC47h69XNX>&DMa(<7rtjH*>A z=`}{iZYvBCT<=u(QB3c#t69!aP}E>&pAp}qqhOIj2H5EC@<0T;8*+Wt();K{)eS0J zmD_;Ocidp`kTT81)wpEy8=y#!t(jrFj&+nWw&`YcUFCmnPCMi;=^^=B7pT$m)_4H}su;OkzP>^~EyuQ5K&MBRiY_{iF-F1v%0Dndh`wWt$5 zpG7)dHx&G~Y53gcspwME_BJq~an}5v_-`E&X+`40q|&~xeJOp^u(*G1CQU{TS{dlm8bjOa(i&qD3h_E2Z#k(_Man)_^-G>cKKZrNl5z`;3HpK zQkuEOZxTPbH=IfD)Ox&j1PcmReU5DAvdqAwl=ORv(M}GoC0E5AbG>~F+G}Zic!0ix zjf}3pII%hnymhA_8r1+C0zyRvyClAD;uPlYmK-GyqD8r`4!zEVD7VvR(H{yyL?CMI z)|i{gVhK}}QjUeiS$#CZ@<*n+%d?R_QZ0phdm9uoktf}jG2R2~Ln?#e?^LIMCP|ff zYh8AG@zLK{ajY+!emMyC<{t+^-FtJdY!bn#B$mj8nowLF z7pm3WO38%tJ*dc?&5=(5RN5-4Z!G$MQx!ixoeKXPlnIE&d`Y5};S9h_Ejs^!ex=z{ zy$Y+b;n#P#@af(~jzQ6Yd#`8qY&omTzdPg@I8IRDq<`)u*np9@efsJC+D<4lx$PJ@ z!`g5TGtQ4GTOR21-m{0rOpH>D#}_cX+InG%tMnJYM>0Vfp`6J4BcRrUZCeKcf`EZl*5>u}jWIu>TuegU4jjo-l`P}8{g zMksBi0@xdvhew*4)e2{{{r5Z}r7lWn8r>k-g z7btqAtUr+S1CEEytXIeHZX79_SwTbyB9K{0NsmA+OpwIdJua$f)jyU29}j86MarcM z-3_uQ{B4I@uUr<*pQD6Z^)y~KfiYNrYZxpE9v#^MKJy7d8(J?W8&>wu)y?yal`1^9 z;a)6p?JUxdd)?tawDHFJUc}4~qm``R-<5Q?U66^oi!8i zGcXpZrs;{=uNrM@aa&~3+(K=2ePPr|l}ai;sFwJh-74QAM(D+xpit&*-(pejG+%ma z0rVHqKFg~3T@&?*7`)7o#;tZ{2BfmS^Iw9gKI27?t6mU9` z+d37t;eB<1W}1%iGQVuSff9n$oZB5|$`;>EA4Xx6HS-j}U_RKj;bVTOpR;vG(%wLH zZSAo|W7yj0uV&zOd{&pBP3Drkec4GpAeDyh7_P0<%gBggC$9#b<9wc8DbR*m_K}Qh zNSXvV8AErZt$;JpSAsCPBe5;v;9{J6|nWM==+yliQYR0`CdoJp|3tPuY zxbK*`D7hnf=*7!ns!!y8$UjoGA+Ww`%cJrQAXx^Tibx+^4Ye`&|F6gmHc`iOc`*D; zXnfpbLXC%ZZ@PYvohD9c%Gf5Gwx??EO_zbj?f08biP4wVDM;^ST`6B=N9h!9;O|GE z`!goc4-t7wyzFN1d`p);p41n&put=nVpn9ZvJ>TJ`;8@)1*1Q`x|OSA6DzTs06;!n zHYp~=^OXR!(}V0gP`;GKUK10#fErIK3zq%Ytw6#0@DH^~vy=H+*P4Xz z{lSEzo#j+hrz=RsFBd4>Va$TBzCGtHc>hD0gt#KW?Q^jzxsR+K$7ulYYp|MjV&`-0 zQv-Mda^Uk_jsN)o=x7kvA^i@TX1dD~IeGx!=pHX!DD3%&=qE*?Ct)--WTv}RY_*q} zXziSV(PqfaS};iwa!m5+K1C22c|3}wLXx_CzzSrE0%xLL>iB`Shd2)*%lzE+W1dnD za;2<0<%sKh%smHTp?0mSY4L&fQ+1Y_=A8?+kx}~P*EEc5bJoANbL!kBFk^z2p8>_8 zRsGW`9I(IPH~h-P!)Hve_R#G&+i6A!Pth|rsb>yv!MLPg@|D5Y_xaPmzN!S%M(`|r z{_$JslG3~TtdY)Uda}Qcmal&sS1DAW9=Jy(b37bO#9}mEsrap&GuUs-4qj;4#N zRthVVBaF&#H%8FVDC#{mEGm6I_G#IG1qUQse^30kij)hZz6_@NGzmt+(g3jx>m#or z0me?v_Oy?kUVF&nmG4mR;Y}yuk#mc}HIeDM2lgO?*y!JHKgsb$eU|X3sD2ywPN`x; zJ3~tR5{ob|oh71;!b{g=Wt}XyEfq{#FX8NHMlzduzqLDkmJk|%3sh85^X(2x(0b6V zOJCipWrqU2+W=#nY{jcmcl<~dFkW}JoqX|#KTrR*z(CZ7-jM$Uk87JQO}NbX`V^Y8 z#7Y|@yR1FuT3L+>{dHiC!CP>fS3uCCIw?QlY%e#U#kQtuULuIkr_xc+ny~p7erJjW z1{@7&iG`60wq2*Im-h=Jc3T2m0b-ot!s%;nI@ZdV<2#oIfH)=m_KWy`qckDDBAwaA z)Ad|J9nW3G%j?JVX0QyAM~n&*4O>+OLDoFqi(5pO~kXH4_E+_Tc*)4Q$m#V@0y>NetYg{_! z#0eSJ-IynqD}!DI-cM18F!vEO6oKx z*s|uDi_NFq=O|9e3+y=gyxr{_c=9P@7Fc1Z0ByN=xBHzxpM)yV71zlv$)4g&)AA;+ zM+Z0BvOgsqU~K&bJ)W{OFeudj9e1)(XSui-!4kD3ywy21sNnqtl=9rMOe&W75+mtP zImqNO3Aad^IfaS}h*<$!vR++8pjiL5#FNITSX!-x-*>q2+wVl4)VA%}&pv84FLXnS zY+a#AdkLzUCFx>rv907Ew1)^E`tn9iTlb+Lr(kujgC&AC$hB*y=<>Q^rJMeDllEf?g?+9K zjYL#ZI3Z0dk{fE-`@grY=aAYJsyo;NH-smBVb3I`AO9$uuwRQ=3~_EJUU=v#BJ<;2 zs7rn3NTMO7pZXSc{VvX1ZAsn`T@_>Pdx=<|8-MTe^Tl5Zwmi445^%iVbn?!szMBf*9_UVNK{ZF7iGiksM zku#Cg?Ag>qyT6D78u&j_STOXI5`Y_DPfeYPhx^f7u(X^7gRHubo0SnSfI;xpk(Lj< zMY>E(0xTHF9=g6bJW|e9CVzT3w_^DCNyAlxK>v*{qQc3j*0@Mbg-dMCCJ!3V0DtSV z*MhYBmkBN>QQ|320{yZQs{`P^+XIXT@3o}rs;&aJv?s?<3YnDzom!{QjPW79#0Fcf zIdj0}VfW*+8rgBX*B0_^TRS?@{r-f_>*vOqzcg3=?VkA`dt7= zG<@qG!YtwRp?koQN3xyF2&v$$ln*KG&h& z*=66N&U#uQGdz$~u?6oQ+E*(ws-1?eoYUZ)H*%8^yjjzCZt9^AE0aWP$N^PIWNi+mX^6|X_SDfho00nY2z zYnsHFl81uRsB)}=;t=K;hq6{7 zXMG>_3C5zur;`^Ho+$R{Juo|Uay+Od95}>4c#$#)-uBo(CnE?Vm5v7~Oyp3JpgQR> z2kws-N2*004qrBhq736B zq18&jOcna&=6;;Gl8ky=fqdj83W_^-sO~N=V~Vi0#eC3#Jx<8*A$cpDh>Wp_eR8|w zeCvoe3m5AqzgE)LCUTz(-gT+&I^d}BJv-)9TRC3OnS>JPp}(okKK|s3joh9pPp&d; z*52okbn^xdEyX_3bh>30^1(-g$^>k3U5V@X#0hx+`|%dS=MSobF-o|XU1F6n`+Uy` zBF7)PTx8W3wW(NV;$e^Rl)oRM8cx7I$hDd^%Ni}S!Yusc7vge=%G||ZGAHp!U04}R z+2(5f((Bn<03a|xDo+*JUsU995?vzUg<3oA$@}V3q-C z@Y$^u(XgAumWRk&b!bs?H7GQmifSoGP`PF6U4H1=i~Ah-uV9^_=|E*f8dbq>*76Q9 zm+5fdqQ-3EYsVL!sx{Zfw{ThetQO^^$5H3^upf!K#^;>1HiT;;s!f_Ee7U0gW`D;V zFC1M#?wPUSQ~mi7K8B(VW(PA{b4MSgjS_Vx~!z&$+Qu6rwkU{8A_DN8ab990R{{ zChoQ=Q?2`GYA55mWk?h;S}-3PWCyFdErlOOp(%6Y*7-{dmrL^FNQQHo#8+NhWw3ak zu$>n7FS=yOilN^A*iL*})5Rcn&s#E%RWQ9@2VZ%Bxz3(D%{Nx!mKZ#iH(8Qi+M{~m>e&IU@k6ib>Y`LIR6wh_mOA$P%w0?w|ku8DSk z-)2LGfES60^6(g>zscjn%>%im$9g-2w5d$Oj=Y3>>wyJ30!T67jKmFPf{Qvn zi_(SVX@&+xFIc-6)R>8D?Dk$p7{)V+X@{9b&X!nK0q?p7HWdbVyrC_6li#%6Oj|{9 z1I=crfW~ZG>SD;BK`Wv1FqiGJ61)Y^Viyj#KARG%M8+Fx7+8dfuU$5~g||T6@)66e zVhNLahNfF@c<`26uS9d)fwNad|Ix$!0D@=ME5Ck&iN=riSHKxU>vix&FYECFrDFYZ z-HDy)JH-L7AJ5BL#B~;f`0$d3$4mKdhz-42k{e1D# zuRr8)2Ugnlb(0OP2M9u9;k7C2Y3G51319~Q_!y0Pz*~oz>l$9)cx{8X<_SjY z8TJ>BlM*{7g^}}!tILZxa%O2wU}X#TwO7AXEzqw?J8RE!{2L#rysZd2hI0ii-PS91 z!cWz?oy~Wb`b6jv-E(z#jS~eFi)O%oEk8;YeeYSt8(=N>-YF?XsHarQ2x8GN9<`qZLz}!Fw8J$i{_1sG@_)H{+htp?#NsMH0!b&4&_6qeGYF=~*r< zW;}5a>MQHb$UkBw5^!GG5H={MRI@DLL2oNi)&r6|DU0gjIX$}Jd17Y|R0qA{Bmd_u zX~EkX0nc8k8m>giNQVN_f$&EA zX?63+L9U-wWYK&pJ16_`vcfjB7*T2#TakLo7#%3y@%1~kcdLC!w0wc??>?Y2Eq(pU z2c2wQn$10cFRrV0jzV^HxIyXnBqk~e=nie`xh50D#B4^hZg_q(@(+{DSbQ<>_5z#p z3yc@+)f0j3tWuxKejw?J+GwU&ZMg2TO!WD^op!9WTjs2EG4|PPuaDHF|5mUhN?OmOZD3v!H z6y8#@|0}vd>D?Jk><3DZJ%Ch_C?fTwskDV{%O8Y5(%Xvi%TsRLe?Hv7EAalYf2!PeiMAV4UspV}v zB7!VVg9`n`p=2IYT{NcdW#D;$$NN00L$F(e`MBrO8`sr0S3*f>ud0WUAmZu-lV@3K z-DS?Udlh6fBU_^B1yfhwrpdA)$r*0wgXV*@S49H8y!&8PMXfUglA!0+)g9)#*Gt^> zdl30uNWUmwPC!Mfk=OiXBb~{T$IQMq!RZ2ioPi=Kuw+u~vtBt6p%5CkmKwwiBK?24 z(xBSaqojkh+vpP_>hyvrjv>^l%1 z7aOG9E@m;z|p3z8UhEG;a$8-C<9QA2@~;G zm@KD^G(g#GICLA>=!MO;&=M$BQL^*ejlHoz^+i-z5%4$Rr&>0iTpxd(kKGub>={k` zQ>i9&_7KaVdoOlTBBn=4FF+k>`3#?e$`E`V!q#CN)?nkzbB7rLkJWtJxbmm=^A|vF zvM5$hnTX0rzwsdH$CpWslHv4X?|Lf&%e}rLHLjukF-eSy-V<>dgPPah+dhL@|J%nV zQ?Tga8t`{_bMMf}N9|4L$tU%U-BOsmf_(gG_<09s0ulnga-Lkjy(e(l0JDUGt`#>- zCY+rkY5*G$Mnr!_w8jZ1F#NQPPa0r9?=-^qP4} z-|rFUz^|xGb5kKd5zL;;ma|4)L$Hp30eD`0sMYd`QUnXX5)|PKLTVb8U8%@1{l>4( zILqCw?3>P#5`-$wnLtPZiy$JH=Fn%wfp+RC_$_Y% zDeng@d2EZ*z(DX|>a|Q9aU==!X2ekFQ(?PFRn_CYt5PU;%yql0ctc1O;X1I@U)?l#BIR!~A8_)><1KnVNImJ3F0dxy|tjDRaK% zx457d1UXa3oB8N4Q~NfGmaj2VGgro6c%jCwWwhYA3-DOux_(E%r$dj&U#>s31(Hzd za&~&0(Lh<_sbA>nDY|qv^UBNF0d7c~z#@6P<3rb?Z3%5088i-8Z>Dj;nP7(PtjfB@`wR6pa7|HBe$G!B-X$?>;Q&#n-^L^4IjF>9-1i^nk(g}!lkeFfg}>LbIJp0`b#-{bD#USv8j(?X&^tKSsz_$XfeZ-osE2Nk-YcX!qg~D~zP~TimOr zSWwMnbeDH>b7EXW7q^Q+XD>Rh6W*3O@D|_kzOJuXeTEmE- z<*?ekSv!tJyr8d3XJbI0W*~79>pW5PB>WZ!KMUb|^jJ@*CPty`S9 zcX+!J3C2UIFKDA>*c%x?&HJCMV1qBX9`8M%8fh9YWlDBSAA)RO{hr)|a5c{h^V2?kqWwwx@VmacJ=+iRjJgPtDHfZ&hO^DaE}( znEz7DaggJ*{rS}$zAz4(WkA7RE8}!h#&dB2wpCQvZNV_Wp?tR?t@pJa)FT`9dOdOf ziNl4&QtD_7H3fMXNC6t4a2MWA%4V7a8?Pin7~m|t_wkxJNZ@9#9vjg2L0`=`Ii#JD zh1u*dtfL(;M%MB%gnPj47y$rNgYogJ2AK#X3({MMZ~*)Vacu@nq5{915i>l;{pIZO zr{4PH&SYx2BPWo@@0#{>cMHeR*N$qdV04!yqNM6u-m@`7Ma`UVmqlL-fLteP*;Z0f zc?%qfni42n#jLbGrOU?-HvBpK;0m-QbQVVS zvq3HsA5-eaZgy8)J-(kzI_?5UnXW5?l<5sTeFlHH0MfoM^7fkCsQeq!z{Zd_z~Wrr zyLahQez2jf$^CV#Jm~0t0mmE6SA;@D)V-hIsNitN4+Y|e&lKW zksbaL=|DQ?fsXG0yY#NQ-~JYL?zq(Tp~+$9srZ>Jo{H=m!l~z+N#{QKVG}4WX3yjw zoDR8BfM}$WL;AN?!G=NVA{E%rz&J%+p-59bWxiKWuq0IHU9Am^xba-B@zgE^goWemow zsf2`tYB^FqKk~^%UzBp-5y|e|L|K?M*#oE^;x-(-A1Z7%#w6-kHggD8)s$CKwpB09 z1xncad7)r=*k>^P)?0roU{wnO7}Mh2%Sdz{iA9Mee(=tn78>Q4%LqHL_6(b26erKqiQWVYYby8PxI$j;H zCxVtc)0z!m3>LHlE#<(}0S}Rl&@2_Om7zin;B8!-oQh)L>4S)DeLc6|3nRSQo)Fnk zxP2=`j~x~&+l?w0U8*+`YAh}6_u_$o5|~&%<%IK3Kw=Aqa0m4P5-~T97d$YYa7ZE$ z2Kte=z%#h<>u-UPFFE>(Sh_NWIdjFUGZZr9JHO-jwi}d7~pO>OJ?Su6%jd{+cdb z{-%KxVzV8{Gfga~NgjcA)N@&Wrvve68*QM?+ z&_m=^PFCs1`jcXEn3zxC3?o1lkqnlHk+3|b5xi>_sBG;tpyDQ0E;KsOJGeHInerW^ zmme%o&IR+jUPt;q0E;yan!q_>9F233+H!W}#Hxd^Wv-Td!v6Oa71=FFGT0m$l{B-M z%)CYHrxJiyjcHBBjeei~H7y1)r>wjs?p~o53PcTi3>>jKYc`$msUEEQV$2A?*j0^- zoM=VY-(a`U%S|1p&trS5Zy$g?V9!0jo0ykngzqTpCyanHK*Ht<@})j4x1pS1s#<__@zWpgOIpNWGu+jg!tkgKi9#Z zQYW*t98@>}YU3dgD@$6-VJ2}q50O@N2^B{5R&5cNQhDJQx+oX@OA2`A5J)<@E+hV) z1DmiH-n&+0#o@CX@J*uvMPjlm1KkS?3->Aqp5wx)oF5b`K~8@R?63=Fc7C56R-RDS z@@QwFzcEsbu_dGk-n+}n+w~;k-8~G!GNa=m5gUvSAT}$ryUVUW1}Smiw8Z{x-iu%4 zGDf2fiB2F0D*%Ca%W)6Sz7!}5w#W+az6Gy_Hzez$#JEVH(uf;p`Q5w4S+dP=FAss(9^mg zKBDSYMLyqbCpG*|NT^ad%0Hm=CvjSk5`YCCujvlt*aI--`SR#0-QuX>9BET}J44rC z3zsOrDB!P|0@Ng5t!T**mPU$Ac9Y5W`(eOH6@28#!inl|_r9}pnsjdL7 z{(R@n(<{X)@hmT~)y54aFO{p_0mE93hKHm2^^`uHnhMjifZ3r28U>H#rKx`cV$eD) zNHP$Z>AIeE6B@sI7#mIisx%YUqi5m1j9b>|4L~|~_(d{f4bZP267g0%uy<8i3zY0u z-%|Kox3|n`>FT>iTHMyPJxZ*5e6#wL&1x652mDD0uECGBGDW9PE&t8(klCxBgrlxU{2gC$Z=aWjboKi2#{;MuQFNsiDnG&ZsjBDAD!idX1|u4A51H&yV^IU8PibAg@%RSNvy zS3sI4cd`(F^ikM+N}=t=8=oJ;JQrnE2{^H?1l(LuT_bG930Q3KW@RXu^)Wos7KezP zwyN$2Wxe!ndbvy)!*&(*>~p>UtCIFk1tFS+Cw#(;Xf&unh@vvP5-CbKL`A7o`9W}@ zjuwvpPfbKDhb_$wO#L!hqUP=0vetcL>mqWJO|d&A9BsrWKACXB2P*&Q{=7Hk#9kNg zA}RLQ#{>_)&dli8w%o0=JPjPc`=%3g@g(%s#eq(O%Rc=&tJGxOOmR<^J4FaeBWXVZ zTT{Zt<3R)B^%y%?O(H1`if>Ml23~#r6Z`_PH+L}jeP0i}7Q4f41fKbc`~IwQ@*czp z)0ZroIFaP&m+8n)O0(U+JY#LPz345r72xcL4L^ZxiAS1sUJxnLl-IUIzH@O#)w~7D zoG%K>Haxuju@M&7Ad)F`2%|jRZ3<5c`NQFc`H!y)u(sciS2zB}Px!ShzDyKnys`rW zmCd7@A=de*u$4;rP|k?lT$*bpQ@RGcLdq>Wb=ScLnEvl`Y*Ah5^i)*rDV6$(>a=|J ztM-@#=?cqSBP`-61px=$W7s0r-JSflLe8Lw!~3$ri^uSD?cN`$lN17@08ab5G`&Kg zv~W9!h%}Jjs``eZb2|0UWXLyVyy*&;*L?~UX1*?gCmVX z-@X>Dxz_E=bG5UoZ1n(l(R5srV)+YNv?O$brC9oS3iEuYF=0?fRGH2YgSHe>u4>IO zXqk|70yqEd8P;sF4;AI*Ri)1WJ-(~HNZ%4QJj<9(lk6mT7D}Y{<3q!9`&IRk4;$Yc zdjsn8`T{5`$h|n|S`A%J{Q)w?SVDfWU+sQH_86*ZztQg6O+)&C-z|JpAZfGVS&}j7 zKE(ba7n3wQMK||^#eRLUA4=(tAHrBOjy_bl-xDedjpr2(jnFz`0M2VoyBYjGfIf1% z&uA6nU~YByZLF;~Kx;>aB2UP+G<-z__<@G^ehD}}zGRJoB`H4wM-6EBY zf;xXk>@0!oZR#{097a!HZjVU?&0GI?PDgbw$;@ zv*DkVWd*z5%~-SB!m`X5`0dCA*H_bBGvT~_`*udpj^zk60=A51R*-;+0}!)U%bz{1?PHjh+e<_Z}+O&$pwjHm95#5 zp^2Awp;*>dsFBy-zeG-+3Q9Vo8@HwUqs$D^ZFyK?I%jB8fA_U50`&npt8F z3nqB7xJFy+hF2``qX@5L-GreVPL6R?9(R0)uy7^8EEKLV0MLy@IHu1lkzGkxm^rZF zjd%5R8xSgHWASQsp*NKoW5G(*4H4E54zoaD0fbK>D_hlt4^7K;kwWG599*)>ZvXX5 zJ!)mh0F9Ben~U>S>XF7H$ZJ43!3z+$nqpzdlVNJSClx zcMqxaup4LM78L)+6;*6Hv4u-W*n6O#<^WmG3zPmq>3a7PS+pdQc~M8vNF09MsMm6L zeU4@IuFq3nNN$*E5dgobV3&D4>}TEwnhuGFC`rwsKoKxKA`EQ+mGXqrWAHfF^x*61 z9k!=z0Gs^;MXq1#Jz$}T1Rp+UDF?@cg9?KTU;4!XG$f;u-9^%&=6tkv)QY`VYEEpI zM4mGLJ>%nS{`4~@S?kbu5nnmxBgnQ?r6RlQ1nC0dg}D@YF3N5T(Z;u#q8iDP49s9N2&ExC9u27s( z3kyOjV=nH9A@f)gqM{c=xZU@g;EE<14vj_&F3nqGZvVK@F~C>T+;VTZ{ddbljLtVE zD|W?ObG|w*j00CqUsSyu+x6v>#%p+c>`>~Wi*B5BbgTL|hNN;>xRPmSA#a!5%B|6+ z&>NwSrsLNHJ#JyQF{r)xP30V%i$Y}Y_HqB!UBqcd^(IkgN98MV| z6Qe0bBmTeCSp5*RBvGAU@vZ`M?4Z1(>utsvu3pv2+)@Vm1r35HYQUevYGSSa?>T1+ zhHB;tDFD4eH95)p{a<}iw8_A{ZXu(Vggwc?JnmxUm0uxkF|*`wBpYv4C`=4Pk(p(( z_S>k=5Y%w55YCsc?KCHa#tyFt+K9C!z z{=7(S8eCkKJ=b&{i5A0z+Hw21{+7}cPymV0up*ue`r4U*g5v=ghiq;s1`|}e)eSxf z>S4n-r4I6rL_e4#(p;pQBVrR?VvKM&2X>It-&q(K%4ek7mJW|3vl3>(+(DS4Wk-(a zFdH{<^L`O17bNq8ah$7%Q44@P%nu& z&R%B#%%io-Jp`YHS1J{!-KhI*2He7}0W4T|5eS+?-ZIaeNtZ$V4=PAvl)W*4oF#Qe zPC2m>7$f=E20^tQ+(L|ahoo>}m3Je5-QS^=;xHFzV04LO!odN6U^onYcPw2mFjtaS zb)rc7(XlGTAXmkdyfBhIHsEPqjo9W(&|Synew_@3MCKePjYP8`XI5fmYapc4gK zR1q|iq8xP$`Qz6+yWe5bKMl!;^xb%ouMqL3Gx!2?DP&Kdy%E0t77R*%_|;R>O9qG` zA5zs_0Nw8BsLH-`^246vX^d@Vg2TSp`e_VUTuetfUYHw6T_>8Q7kKe>XUre*=`df4d$ zcD(4>;pPKZtd+0mm0SXBpF^6(LAB2J5SG+|@nbLUTXil&nWr&VpGzIiU7#(B9aL>@ zslIi^lsqS-@LFkh=wI;oYA|rtwxdh{iESwGY@bzS&$s10s6)A1-H6nR)oH(&HEa^^ zV3sqaZQ_8CRFBVxRmN|2x7_plM>Vi&Smdb$*mgP0^%Em?{7=Ng;R?Y-pt@aQ_)qUm z$By2b6E|*p-1P1EytP>85@gZB7M5T?Rfgz`rk|;pE|VF4c#nXWA2S!rn-TO*eiF#U zuK>=kPJZIz)K6(yr0f6ATcjYkN4s2uV}J8n5D}%^?z7j6i_iT}JaH;4=9GxU7#Sqe zNIgy98sHF7t!&<;6_GAG`KC@lEq9r%n*bsDb9>=DxDYXi+nxG2pzbfq3&05-bPR=b zK902fl6^yvY{o-t4%!}yLI#aQc)yUl2mdPV55H`^olP2|o z!omkzbFH}UX`mCKT&Oeb5X(4JNyhZ?0#rrqPXn7^0gxE(%O+dT{{E#o6$`M2Vxrmk zp18;6Qvg$rj3?&hv3&v5HXKZu4X`;djotdn8J|@K0pJ7|aDywv&EpJf?_L92#a!VX zpf}O2G`v=BR9^sEHN@?czR-W)&L|!fkY7ixs3`t<2swgHN5%1HfNlE*uwHorPj@49 z)zWVihZ__|4g+!^CooMZdO%GuI5XSoX7d4XQC`ApfzbQ1cvN2-u%2I9UoT~1`Ww@F zL5VtW^lsVaKP9RHv01yH@jh}3pdDa|>rE8_6`Bh;WO@bkMckUt*xJEG<~P1U!A$sS z^(ujw^E^!!H5gXBud|1~8(fVHpNqxuh5)Rq1ZFLKGBPqX68wk%_RAJ}AYVL^*oBM! zd5oYYVq1X$hnf3;IAsFo1XF(Sb=QW5Z$NE0T%~>yK~$?VL>NpSEl%R+Pvz(5Z+d0} zX!|9T*3DL?uVS`r2X}pxz#eGA&$EAH2Ad4{w)!JjWdB1ZWzoiCUj|!)MVuD^vcg;= zN|Ru>(Eb{7@!}ic)?ZyC3Qcr;&yGjG44U&|pj7~Kdb*KypOc@D2H1d6Z$&FpVsm0E zQ?`#9IvAkXv!%`t}vGP4O3G25x(yzj2{%K|YUp*Ma2adTr zq0(09-@_*bTtzBwWg!v3;VIL7%%@!!;N2PBIPlBhYNKVJ+Uu{UopARL!%U{3vU zs}?xf*-?R_0%FjZ?Y8~6a6^Rz%`}1Rh&asW$0h{O>>NOnilAUl0>jQXfPujLD(KZT zX0jRZtim4BQ9L&3_LKedt2+yEfM4r>f{rK~UcKF~A75MnvoKxUt)$RiBs}Me9yc)w zNfJ<5+S-bMcVWx=1?b>u;0t;S&>I$Lsh~l{(>2E`z0$Zf^#6GmD)27q8FTc1{@_3F z0%$-~?Kbh4h={jfX{aD*2XR2!_iJ;H&}|;yKN?gTcIYLDC2X5ydkDX5gPGxSCgL@kj$jPq8GnQpN< zSF7u4(mF6_oqYM`4Dc600RcK?x__%&P74kKupuV_0IRy70xtnye|5lC>pG5G^70vz z^y9|+BN@}m`c)!RV9fXiaN#`yaI>X|Lc75aPx>TX#lQhG=1X?lb*esIw^i-D3enlH z2Y7grjQ?Y`T$xbtzbgSGuYf`ucQd4N!Ji+?<@lNK_Y9dSvw`adO6r2+Q!r^>dTPgT z^Wl_z-|NSRsgA}C6?j8gaRLgtSjmtE$|d%!O3+?XM!p3M?lh3uH|r5s!6GYlH!;H+ zGh?yH*dytGLW?&E{#dPvmODkEH+Ld^iwq;1z%OhZMNFHDj%S-GG?`}NdS$G znDquCg>)h7@Nb`*Z{fCm6QRoAKD|;HeSqU#lF;zxSx(RX6N@@0p_(~T#@yOj8ykk8 z2^D_I72g3k?R`YUyqdjJB-0kqYeTS|CZ!ZZMDqz)y#zVhl3M7ZG-rf z0;p>0i&zUB{;?;6j;KNG0G_fcm^dmbD%v_JBSWrtlpq`#mt4^3x~VE-JrKUuGA_6Y z;?7$F9wfo$2uMrNNG2_kNC=X~3nVtMC=#SJHnXjazh8w^3Yhy19(tXf`glm7s!UqY z`Pf5Sz<|j@sU85$SB=kk!a`yVaxC+9WGryqOa{?rXiWCJs>UEP!4?`Dp>QynBVDYD zt^?^JlXg(rndAeGvPe1dDe@7xGgOR>SAj|>5tL1)+p=g|+~x76$5q=R_r6{Gds_jA zxVI0;X9hvcbWc14Mqq;g9cMm?xGCVVIfYBP*Vy#Hu%epo&6+U;$9(Uz(D8v66jH{H zYasROb->uTIh>t6o+G-~`~7LX82A{@RqzfAK2^Ew=Fk6Rcr#_Z{N}RzaF<_=emi4JgCPMCIn@3II=%R!5Q? zAC*zJnL=MLkMg^(Jhi}evT^}L)*>*QO5(Z2;z3f6_wHul8WBhpRKcBs`|EGp>aP2H zB%YFc2@s2jgj<4P3XC3|x%T#Cve3}>>t}E~jfJXu#Wax~)5!SC!HduC9 z9?$?3vA%%;m26kXEiM={^R?~vlNy0h1f%2_!=CW3U%%dfl-C5+`PgVDZnKpwz>*D7 z#X3VT!I3&3+H%ld)lW!BfK_Y8#sbknrzEHae~kKA*ZxgtfOM?0cmy==&EYEoCXK29 z@G4k*532`Eo{gix{OP_T^didS8cj;&Vu7`W839_91&Nl^&W(EtIw~&Pw-*X8w0ysqp8laYUtN=Y>>&?GJ&frqt<|ax9}7AlV6L6M|5HS25L=9UfzjQQCr`#V^AZyiGj5R<6xzY6aS0H>(UW0pQG;x~8P{BhJ3q|k zD-d~zZhdMVyUlj-bU0B}n}( z9Jh`JzKkEw3i{qC{9r1KLx~X zW3sS;*mbZLH)H0Kk0@ZClsoA|drP{yBF!>ELYwLUBrL_~v%1-k0WB^`MS>SR7Fn_N zfT-%Wrm3}V=_>YfV_f0vcr5T^4kdm_49ZDX|5mH&_VV8u0rij>rf~ z>Yp7*X03xwaBI|=i@#6*;Q|!)#}?~!gMJ%HFGvyqNeaX=;|se> z^Kz$QmAim?DM2B$KK8Z_4F1qw;5`yA4=W8NadCmtX+H4WpO~EdJv@{D+j3v>SWR+J zA2BxIhE*?=n1Uh=P{QAU-lP~z>A_3Tt~oUx&AEC1M1TM*<2I7`mk-scN7)$yv!XY1 zLrXY)c)luFz%dsGV+PcC{*}a+$xo`O$&^tLOhm*L#3d{kQSs zoKAHRafHl_gUX1?UMG8$J&rxgOh$GJNn~V`8L7-Nl9@dllocYgl+3J#_}?FWQ@`K; z`8}8Gxvpn?&pDsZ`+cw1{kmWGe7=me`y<6jjpiXyuLTntrWkAL8&^I#P2YhA-*_jQ zQgsxP0S`wJm|QRg*<;reXBa|pDzV;f8$%-S61ky*Tqa>%=KCk~E1@1lzl24C=OO7W z1?oLw-@vC^BxZ7Xo4{kBeq28v$M)fc@vUIxM>$zp z>_civQt0aj!Hr5D5A`A1z1-L(GZa)w@_~awmQCm(kS*XfetLHMB7;ekOU}0+Kl0yE zR58Q4=Z7-3&*Q_gr*zb>>FUOZl-vtIrG*ocn}C&5*6&Hoas?FXRv}t&0F6hztwj8);aEAVDcaZj#W)n0tS04PG6ZGaxMrshU1w zA*;^f`lfXC%%M|)d_bv)1^p!Zk;;<30(0}tQd^E{7I^z(Z0);<_4aXmR9ku>q9NhR+zmxwc5gQsA75 z!SJioNTt@mWAp8#3_0&$BHBZTa>$fL4M4AA4|#dp*dPlofYKpYRnDSCmdR=XT%u6U zuvjpItTn(}3HCt6)VzD%De-J^A6^)A05i=7qI4+1oWPk}sF5bXz4t?v^zk09AT49TqHAF~|~G6f2sP;fO|KVHhItd-}^ z5Vs}M@FOFU>Ea%-2*V0!$Y2sGawy1f8{Y)QJ!ji3WTWx2*Z_RUlKC`E1UPKUFTlF6 z=jWehyzK1q{tU02bUP4~R3UHS7m&C-cl4M}PI37Z726HJjrq8>F9Y@y!DMv&cA`Q| zx4W`tAp|EGK*>k{^wOA_*aNjBb-9AiPG>t#rT+w=#DylXvWoHEy?dOzP8q?$@o#S) z;}!ULyl3Pfi8lqL85yVO=1Bgg8Q$c?mGl6Vf2*{6!_MdCw)xGK?=q@_L%yuC$vWk< z{pnhU>?vdpgI~#svTI4CAl%mFlx%J*5A9g)W+bu)g&`wKQ*QIs=_UMOaanS$P;MZu zeTB`P$4>pml=q=34Gz<7z>;I3g92_MlU`7w$H0mEH46+XAs!ZS4)k}IIsQ5aJSv-C za;9}UL&^`$NE?SKze+eoQ*_Fv&z6eH#kon#2@qdFSB_I<`bM>)pg*}&9~Hb^Wi z&Or)MB=(qz=I@=Wj3kl`zBF3L_af;;l2Czx2FXK&r^ z(8DAh5#D5(#4)s}Y`#nHQtI_OMZ4k)GI-2Kz7n#%DM%aBWUiU5oR)RDkxJ#U+~@0bEQb<> z|BZ1>3>8g^7XJQ)S1HGp_o30B>ZxliO>(=)wP>6bBFJ0aigOkF?KjE$4JOuG2H6B; z?OJ|L-72I!G_ll}41hfMos+hSCV_#ot1y5k&yXcl(%w0RI1z+`#87GTSsQ8OUg*v! zs$%5P&1How>d;5$h=YcJ{?#JQ*2OzK>+yeUnc7&Q<0s@ouj4Cx*Br#Id@Bac7&*Bw zOt}UQkdF&b;o*k4qdK6Etk?ZG54b=^}G4#BlWO;l?WO z4mF<1e0?_aCC^q=cS}o)-Hm=~ZViWy({2V$ofttH%ihCZ?zK&gw1a>>neg=ELLlkf zjixY(Iz<2M>uKAEkWQyzOrjzaVBunJGqafgGV4bevq{|izTy}ofVsWn$YGZ4&G5>L zH-G(Rz&jHBl~Xr##(&7M-Ep7kQt7*Ms?_ScTb0X15HFz^&w94)q9P8K=+Twi-EtL& zc9~iF?T+P8*;9ntCVEUG1v^VxY!~DEaR20e?QFCSikDmJyE7!XllaYaiK%`;l3d^1 zilM^tE7r<7wJZu!tkSEq)HZb<7CCWm#hW#r-K5&4Ggdp>Mnz?1Od4)n+x0C@UFlOg z4f}pRy!(0mFddx)^)s%g2D7#XBZfw6cH?4Oyg;7j9^Ksqa08G{!Oc>=HYlL)HBt2br=PCP< zBuuKJRxPyp)_T~oybNaG*#Lxa{3@!_jNN0OTTj#S zDDcCJRL|Mo`#BRL#TwWG!AwxleueO_d!|4U;(@!!cEDLqbf4A)Jz3xPm6?KX6<&+) zl|r~B$Zorc$tpMVxN$IOy{c_{brWMr-R@426mpLWb>Lj<5ys0?DUnTBaiF(VKCT%_;9dKO+mFVT30k2at&^T+qJZU&`lvnCM35pb5q_{ zLD6!Jr`5oYyS!LIYeg7dxkVdc2_duqm+YHZM$L>v~e1bx>Pq= z#5iI!PTH5+#PZr}|1vSj>PhhT8>wr!2y->FE+bX$4>?*&fTBI?DKCTMl*$b>=rnf` zptipF5ikV`6aBL0h^?)&gRPmZyn6GuG6;A}89)?f#qT1W0i3w-vb&WpA$vLxtbvpy zO1-R@6+uCUTLY~7*=6s?&|$xGXdSBoMe6EY#ZRp7r|he!*hT<_(4kYwlmIea)zji5x92+2Zdo}d(PV=sB-PEnj%?(AGH7N9)JI6_fOSbfPpcv zyLP0NOyZZ7DNDW9>&1>x@gF!t&iM62(=vt)E6XXYlRkVXyYVMwVx&XQHwmAEUWF=b`$JJZ>G_P>63TT>7*tPJaNl>ycd+D&Y#j#o!x`xa(Cmvb$ADwI1DTbHX0=^t$L8nW4sER+i^!V+|BW)2eMAxnz1H}UAod4G$^D5xw ziWlGaFF6ELWCFVf#$28`(jncCMwot!#t z5e%qwkxOPYhe|A$54iTEGP+r=&yE^nbdKTi6SHqjup)I0Qv*u9xQI-mx@$?1Gp{u$>{W_^#2*wZ6QHv#KbGF_!)alvv_ z6GJXyO?$(-N9wn4j?}puE3};6epQ7@V&A&R6ot9dRv-B~`7q7>De`GS34yGBN;D*! z{Po*gHLSO%fHU#@^=vS@+dnSg2FYRDsUi0n8(1MZds0d6>L5rpIGlIbIaRC&OEbc)ggMPo(x~$M&DFJ@v~CC9SSu~8Hwzm zexd0=hyTd0+@OZU=bkX@ihn;$dE6e;FhLKL*D9WwSE<|(Av~_s0^sx&#MHyUb;d?j z4P@-&0rtUDE~KXik`m8N=+3CT^PI;yCh}@OvC}%I(TF4~8j!5ai!Cw<{Y_S) z10qRa_E)jE2m~eu9+~p|2i~P-^$SaLi(W>iL=F~!znm(Ip0-tP@CA2obV*%|{clkT zzTcad7?e-~(~Espre`v*_T=|-s>WN`33W3fbxZWYpReEK*9n?0h$%e*RH_6i@teKjR3p8%|T=fyUkBcLmVzteWh~) zz^*JL0=4`bfmn%Cu?YVfghdR%Xd!9T>H$d4er3S;t86ST_PE}09{}Q=R70Afr0`}! zx~`qYmd(?Xt>L9W0_#i@D}#hysnoVx-318@cag97m!%C)pNbWQLeK)*d?{z|oKygv zw}L7rEyJf4jX|+E6>MtkL@}Hti;@MvRXuml>^UQtpU7vdipK{$S$rL+@X&i0yuB#W zCh3H4_~%cUGr&bzH%Y;64Z83-z)jaKG7?&T5hKUY_xJ?V-sQ*WDN^CNEz!JF#mo94!U$M+?LmCTsOZhV}V0q z3~hxE&`Z^R_#3!8jH05VVi11@^QF%&K%YU6=l##0Kh>ae#@s?ak_mYsHviW)qMdvisYyJaFS;2C09;C{;q2fo%pb5`ey zzyF<@H}43(eyHJMK_ybz?c>X1%;J}$8yfIM zW|6m^?1Vfv%8KJ2;FMu~qx74RM*xAJ=k)uTM#^-IL{bQTtVU$Sk^l3=9I*m=1(4M6 zfrvspfbqH4^W?*@Jr6aGfTqdEC+@nqC1sRrfp!wi003*YfMyQfr>Yxrli+_i#N^`! zC$sw}8Q%aCL(}hn{dAl9;ZX%W7n`m$L%EsgvD>eP(NH87R56|~t;W&un?!@Kiwfij zLP^$eON*ququ`=8oEe867=qk1egQ+g5Aqd8_Zi-zcIxc^adcaj0eA1_&L<%shv#4~p?jDR4H?!Wjj__K%XE`Z720a+EKK?Kb; z_P3AClb~Oz3P}>^`_QN?URi+}4XtOd6-k@bkI4)k)Vzx)w(qYl&p8_tG!g$^tb9(0 zM3RbF@^X{EMRQ1GPwpE>#7KZyKz&49l1i#B46RIr{sf`||2PKR5*>;%&FG08Tkcsz zQnE8$qN3Vp>3OunmrQ|<58g9f%wn#S!QhQrsnqG2n0TQzZP0FAcjf!1%jH{n4OGMN z4d8-ofDah7O6pQEW!Vd^9|!wT&K6kox(79L1wP~iw8B1Y`PW}>VPT0v>;jn`mCH>P za^xahZtB;rbg{Gt!sEB^-wnN2nT$+OfYBr1*bo2~;GY%00tr47a$HC^^NE&0!<&); zF7XWvn0-pQ<-8xLlj4Wo09;bX}n$mb$}T3zZP%-H_t+66%1Lw6HwCC zpIsx73u#m=sp#4yq?$~ZSWo*~O`52W11@k9SjIFA=N1$((ZDwH`d(5gx5&1X3#uhq zy*Vn)0)C5kRG|iCQM35)JfZZ;cQ=m;q>qdkB)}hucO`uY0=H%;Ab{yW()Ufb^4o>K zk+N7g5H{DT!5BI^>j{?^M?N)&7e}f;iDbTm2i&74K@fcH=gDqza1)bJB}-d}oTVVA zxl$4H80q#SO-|r~aJq{a*=2|w!YUsC3#7h&^7cQ#Jd1k6w6i2|RgeauTt0ZzW^O*J znhp5|V$($uk+_)@?%WDco2a)OJmGXW2_KBn0p(LzYvKs+N zBY+v=)01Xi_lUpuVMzoX{qRy}&wWHG04Y*Np7|MEO%vC9p1@b2PRB1i?@tP@TH}#< zJwHsZBtW_X>=IiWILefg}zTvexFM@k}1N5r{mD0tkvg!q?r|z>et@v zhYbu3#qb@!jIa&BQpFj8MPUr%+tDzVsfOq+AVT^zrWa$3DT5Y!zV=fm!{y~= ztt5$FPVcd_(staqGo0?I)E-IczYexCrs2aNfnMUm%ROCfu%=vtF8S!bAkzCzILf33 zu!NjTV|8w8vwBsoQz3xnKZo_zd&QVx$H`t4(t5t#$R$O3Q6aQa`0&hhXdh3?9r64k?oaB-ZirL0S>G4&J)Q+^Zi=r-K zj|f%TU5ZLCfXscbiV;q{2qok$<~w$!w0)h^!c8OuCbNJ7!H#IvRKM%}@hwzi_{nAA z;f=oW4i(7Vfycwh=Jk)8d#^$?QIe|~IY|W0yYB{<+j2QFK>2AFsv*ZVJU1(1=n6se}4Jg-`GmbCGx@lz)xeWPQEtSMz6)+zr1nPv}=tpasu#olWc z`2sS_Uw^v`z@ zk+ftszyx64_v&#?xdcm{1cp{=XjwF;MJ_8j6-cojs)rYdpwxyno<$)O&3qpg7a#L0 z(WVsle{N{>r6Rs!TXGI42e9jABr;bW`5k!FGB|C)I`ns+zn*4^9s0D2^76_yHaVmU zlq@kdR8+vLLqs}A5SvBnwkoGv|1(CEE{$X|8(!zdut>T)5~jEnlt@8K}|7=DKBcr6LfPBDM>s9HH}<}??7aD z{d@&UYH;Jn!46CnVqu%i?$e!MB|2mGD}?-y1JbA4vV3`%WN9M_Nh=^nt+hBbm;I0m z|0e6q^?iAMo-ndL>$JJ`b5ZCaN7EXV?+f;(7p^Hcc%q~IJV1958E!Xx=38E9Qq|Kq zp6>NN2KIIgMv?3XTs7JSZYUR%@xnh(Dj7wTW@l+>$$*joc|P(iMFO+iLP<YPX@^JvQC&A1Af!sW)2)AK=1Td)K|aR_unVps5CghqZwUP z6#Iuh?2UoQYx^>p4K`W5HaPv!c{*>>0R_H04jK;15Qybo!&0myq_g)(`p^d0+fqDQ z6ITc?x)f1D?otGv(4F6a$T{fu6@w8|0olT$P=lbsx;C}@Exgigf6>l97yDLgWs?kv~AOSDAh4zxn`!tnrwMyMJbR`v{D=vRm8gR z1cKV0j%LR(2q*7-W|%#yH-H9_z5ocWPLjm`^Zklo1MZy%$6spQPJys#Y+fd7LHjPd zYSrx0a6H8+k*-KnipW}5&~IcG*X@=I)_Zs^Ro}gx=9qR1|6x&etP3OYmHjgy!kTq` z|5_t;a64p+D)6NrZqS)UL$Odm2NFi*Ca+=F*7tuW37NQ6VRKH zfo*KWcbGJvZYW2&HRLdq8t zwT7Xg(2-{kH8K+)oJ&=GU+W`ulY>RF1ThV~EJ6`@9b9$oug6iAM-lAUZQml;C`{x8 znfMN}imuNM>1e}zZ|Jj$tU2{@sKT*SR9Q3AAne}vIdsGoD8pYt-cVIjGdf2#LA6<8 z=d0z$A$KON0`Cm>Z2u`hW2WTcOSxz^p+US&7)xl`B_j zEsxG|ka+JU4%TiyQwy%?W?2cerLmNhcPTbNe7lRMtx+Aa0B=OC_r27$x!w5|==kBs zfmC%8#yY|v^|1}FudjasELXY|q^1@X>ZYrp4Cr+Ql8oDA3klLnCzKEKl{H*9N+)Xh z1>?Hg_m6wTB$;KGy_t>cUXJ-+X%)l@LjBcer|n+jnXO$afy4wpsfhCBNMyHaLZfJEYo4f2E)cXub^FN*e znLoYmw-$jFav}e3;W=X6`*jdk3k_@Zg)a@`pnQ!3E|3}w#ynB=fQre~`}D#s^rR9| z6wfrhl>5hsD_h@ZW?O;;Vx{~%p)QZ=gXHGw{_%$@4I${Q%@vI*-?jXYiWh+#e(T47 z^ajUU_Mv|w0}@ZN+Zl3*+t&rRFXeGz@Wh{A06--Q1w5vBs7nj(orjzz7FL2H zkpdZWTl>-<2VPdH(d-*7RvB1&ANft5D8syF;W47|m$F@NZp&;DpwWFF84qZfLf|*} z98ReE*xA`vCwk#G>ILwc*GW<#nK2Ac0?y9p%kmFZa#F+MM+7}0vj`bh_IHMr=}r!S z=93@{obWxULR1UaC=D)MJJj0s-y@z0wGUCy@A91EzW`*)i#95 zT87m;l+(&Z`b7*-&szJ6kz$v8kwABqRO~NbNdk+1uUOdve(OOOZLp@CQ*c>{>pev% z9+q>KngrzpKp-{}nS%pX;N4@yn$sx$Q93j4Ga^u02HvDbn0(gl5n4BwAcy||n=;<+ zR|oYG>jlM(({Kx&5r;dk(?Idp;WqRlaX*RA_<@2I>+923*7Q0jX1L+9J=9!TRsK#Q zLRsmz@kqZfQtTQ4Vs|(Yp2l3Ho^AnCLUx8AFx$=FK46C=-l34 z5y;t0_eXao`QyoTEOZ*$q;U$G|@NL5l!f9_q3 z&IC~!Dt!MeLMY_Y$l#^sI;&YR0I@CyK*O(Zl#W#88A$Syc=Hp-)lg;i!6+{sCK?$X zAdQ5}>-B!{6t)=;y3#lxbI1Fx&8mRb%6*V9c{%#2zszy5 ztNV_G!POKYH5hB++<%ZI9>SaIZ5FB3H1!P0s%H}}tGID_BE39#<#|<%*S>$CJD2qa z(nMs41x!6W599q5p4uDH!eJ34pF{sGza;62Lj_^vAs*DupF@WEq4}UpJgk{eH9B%+ zStV6J1J)98zoQycK(QziO=Jx!LOBatg+L+59>tminp=I%zAqM{1Ab!U6%+uAKZh1$ z0wK-wNDlN-O5x3Y0N<7y<9S}|c!SM&chq5urkhzB5 zzJ1$ce^(4rb27t2n~9vCgJkA+i5oDAVQn7;0sj7>N+^oYoS&8S`Kk<<`-gJH$m#wN z^7)Y9sd9lp^N-z0K9-hlbAy5qG(z<)V3e#%f0X>dft*iv6IC;>Sv1B0Fen9@hM3P} z@U5>14;CV7-gsz`;Gt=seaN?C;oCD#Ga}H+p8^}kBEAYYy{WWnC4~?SO+BQYag!`! z92sbc@bdzT3?dJjO1)Hf455Aj^6OY9E{rXvCO&i{u$j3(SBvl&LjFmQSzYq1kT1+t zjK3Sdz2Kr&pU_)g+chI)R*Jv^gU8RIYjhsAHBta%e-<=7v#S_;1tX$nK*j-KXz=`| z;C^3jKhpa4t$}+V4I~VJie*3I-}zQiMr7#b&6_ty$|Ox3ZX&XF@~-z`mU@t6e>>>Z zj&nk);@c@-lDcb3>$m_H2VQ=ym?zh^w`LvWX|&_Ux-xV{9V94|h5f4m#7h9?C+XJ# zL~7SPk^PA{&>KDR4T7-&*Z@qu-&=k}vQ>TY88-}D*?@OqgpX#?fEd)SK_vkKC%?_s z!T|KAV#=1kEl-ug=)(Sd?R%m`Waz=Qm&NUsV30*VYS(_P8NAZje-mISW}(|0Om&Bt z=UrJ`;@SU0XFWulK2y9&uVVDMfEb84a$3@IAnE<*u3RKx_2Q6KJqQEI#ud)}0I-Q& zg&h@7&o>3WyNL${Q~fDGN^V6QkuhslNwGAd^@{vbXa4A{j&yL&zBh zEmoPbM?q(^-s9&}Uejt_+U*|&Cct%NMif30fp-ohJVQvj7P!M&+Ybi1et=Z$+iSKv zl0L|cr8_{&%VIN2$_=VWfom@;M!OqC)u`|bZJwZ|wzP|AYjYM(eGb+m-nIj}b)1OR#w=LezgPcCN5 z1s(Lm%*;jX)=;zOK*)PJ`t7C7_cJ_{p*&3u^LyXYVIr_ z+z_2spv`D&9Zt(fz_pb<2z%ywDzI|l#0+4=67T`3+@Uvbiy$1=&y$fN+6zz|{VzUb z*B=PPD4|;E_5#IzgWuJCan8yzn*d}#+gPAA=)lXzJ-dtJ{>U~ zk()bxl=UbE6Lf1xNait(_ErZtt7-D>p&Q<9Gg8+0p9>3BFCiB)5hR$82c{ zWvf`2J3p)#xl#snq~j?x`bF~NMCd~`S{0#~Q2a@%=<(#U&+?vz24H^w;c&!OZ=|-q zO#$H_W>uOeT|ph&bHufs)5cHNo~nZcmFd5J2Ubj4KLtl)4C;SE_B}1mRUc@1Aoa2J zlv#Z(=bp__9fGmBLzEZ^zWOO7CWFAt(8;!9!Kp`_E6 zzS=Z2#8jc2tc_Y5*Z+HXcm>l~VqW_CAG17&TnM0-J}ebq+88fA*+qTTgXwMTpaut4*lenwok5Bj3 zp?Piuh2>lbqjNE^cAu9DR#h7nQ3b}sHwtprm`2jV;qUx)oI^F1#zf?3NUg;q2<)xV zlGu!=?V`D3?ECW*!i5h?E(I`q^l6cdO6y7ckWkjd{PYCK zXdU2#;V|wDJL|RxS2d+7E$&P81?SnLkE3y-4^%Y@(s%Dy3|ldb)WZS(bOK6=Fifg6 z3$4Pww-N>+$K#&XHoht}DyLB`c%(x^{??&R2XOteYrnpjKQ~IZW4DZA0N65;lS=DvnQcTPtgDyh~vQXf=bEOP+6V1 zYb(u`>Si0{CBsr1BlieVDg$F-BWcjXdj}o!IT+_&`4$Ncud?uIFrO9Mz+1>w#O)y_ ziRyeE?yzf{cv(sq)PE?o+(NBbnhZO9U%+Of&*A4z#^uBRAExsS$HG2T!b*c~B8`q9 zxQ>J7WHiTZZZf$kU*XRw3&nGhY=VaHI>_1$E9pEOzNz!zu&WZ)5vZ}$>6ki;AF(68 zr&AUmWL>(NGtBnK6H)r0pk^rs?$8iG(sO+}8s#8FP{pYay&Mj6FFj2(_6EL#5LtYC zDb#UI&%pGuYd=FeQIIeZu_cLPK-ig&HxbgOxK?L-jK3 zIw*zNXjVR+wY_vE7PjdgiF&!3jn82jjImy?2XaWP#$cB(^=rke{y1&?aq47%!b^$| z80Jwi02(l@!gHubFk$gPxfD0a91pmFgZxE1n(lFN=`lfXo65KI9%huX$%W*czwbPj zb-n!3+pfXjNlhvw9@`)en%+`TAdcb*%gDX;9Y#I@J1Dkb zt+!QyH32i;0|a=iamCeVrdNl~vlx_vJJG+VNX{r=5peB9iJRnfw463c_;AV|w6fU~ zpep!HhH=jU)V&_j2gsrJTpN?cV>KiOOM~rKjqT45+gG< zY;(}#E<0+OwZWapU<-z_S`opJR0*#~km`>XFYykS((Qd1yJh>7$k+Sm5TQ)q2~x z)VZg|m&$>0)CU-}HPG=i=h`A<`+kBU#|A;b%lV`zB)c2%NYSC>BCv=B>YsG~rN;gG zK;WuKll9h}Ydg9cJ?`buH*};%&F+X9_+xiFcz2iwg65 zctg6C^T!G#HK80)#a-#*=SADNOU{h>Y?vCaw9*L!ckU+y;3C`Z446)o(K!4d$1=?C zQtn5%3wJocK-_CrcRe;S)i>(E=!BF(7Z^HoGpL3fsk*ffMtVoj>mb*H0Zkbk`uO?2 z#&aU0)3kOi1CB6_$?P{*q7TdH_UNy|eErktKoCo~uV|G5SehS`a??>jYGwjvUNdH^ zuoCjGZH%(OZHa>XzZ+`qX<19k*p0XKBh;9L$8|sQi_D%4XYLtc1Oi$)h#Jb5HRdNE z)?-5rwlsv+e+bF$Z&OxgMzQg%R^we#1s91 z7XL-)K4dgp4`!*9r8}cGBVZ5GUGFbGx{Aubvy)7lj=AOCOn5 zciiCruz(Q~rNWFG`FBfgRE~>Uw;kYEXEFRh6;`Y-Z%2BgenR*a&wj&4-@6t^s(K-K zF0fDeZ;V~~@FKze#RWrAt|sQ{?ks`}`iuzBKsT_Nr zQ+!{aUfmBoBMv|GzQs@NEWGK>O<)Oc4kCiUJS*liyw2%+PNj{E|l;fEa=nz z@_()_9&JSu%^JYl*RqvuF)v?wm^K$Gh%VhG;utuf4ti9j0yHy*fW~5dP-e5ZK6Wne zOmoPVf=F z1P!%}16-J}3F#&yS~4%;)Ismq*U31MkQB(mAZlZoPDP$Q^85Pk_{VK5YgI$(eYK0w zg7us)f9w0QZ9j|mYY37vAnn^ft4{8x78G}9(vBLJ{bkj8O!Y)GlC}S;cqh(7<&^~tn2^B6&$iYiW_-p?;c*v#EYKN^n zsk$Q%n*yY$_p@4gtOo}AFVe`Y`-Epg z7l-?zB(}V3W{zwZqxO>GXwm(`k$6l@pi9REtV+m9ozO3$Fof<1;pl?&gN<8AQqqHz z_Loib_w6PnHX?zP-F!A2xs|A5#WlTC2oQky z)fvJU)!lfjB0D}`Vm%)zXe}u>58KD)JNI-+uAJ?5Fa~w~7r-cqeWLq{GH{xcl9#cQ zzZL-XceVC$wgtdq_jte<*`b@Sd(7(|Gi)vd^^T*xGq3k&pBV>IcL|VekbHh*cFls6 zwiJiePMp1-^{^Yc{A5V!(G%yeV+!zJgyN&98uFK+_C1AFh{k1u7kjRnZoTTkMiqK6 z5Q@)p*O7RxObhxAoAsB|rC%pNS2s`e(f*)A%k|cc@1dW9)Lx~!y#%#(YIV^wg3M*| zI!B=V1%?2YiMr&iuI+B_Z^DL;O$Mhh43XU)aqVQ0d-hXYCToXZ%g~9^1rM2hE`Sh$ z^KJ6I2<%HFz;&xfDlpy&JC%yVJuQcpY8k|n_fg`rM`b9}b+O_2TC|c;cZ1l)ISrCX)~ilcWID@H%k@)C-o({+)6X@o8&Y(j2$fNmBVqwhfT z$q5R4mV#x_wA$EiFMzy^t}{*M;Ku%CNuXHSOZjCao==+fFDD9DDzpGJeX{%D<( zxjqZR%Ns2xD^22T6jSww&X~*`0$0z6q*vj!k9c-toWM?cRpT{ZiZ~w@pJ}Quuu&L> zNnSv%dT%h2q&Ga@E+5O9x(WOMQ#;%c^<&3@!cN*Vh-9H>do-JVo|S?G;A;sb(t*z6 zZQswUB~_1mgL@o)T))MzG0Y*%a`oGUeyz8w49MA7KTP*bkx@g|`!)(Lw0OvZ{f!`r zzsTn=!@KrJat49X_zB}7DzPMmOfaaYU!kp)ogDm0R`28u5{s(!9<>ugKr^<7U`}=SD8_{gE1iMqv!NF1u@gXENfp1$Y+?ez7-QJCl!SKa?d@e@i0fS9trLll@g?>(mD@N&Y=1c*iv#9^t_2n>2T zB3lQMwA=Nuiu@e&@ zTJ26aeQUt-ZHAJiO!LaXb;}1sz8iWuIR7sKA%!|}Whu}V1V&ya@|!xq4z|_gNWue< z(LU-+qaO(-wXViW5WiaPQ+5u5G`!{Mb`Wh_)W5x~%zB;715_>ME2nN(_-!p8gt&fL zhFug0t39y8-TaCm|Arjs2Nk+ZH0gu5Vu4P@BM-6fHh@yNqqMa25dS_t4)!bQ?fD~T z*F(fovo0{wYJpEJ2H)T}QUv(qL&JwSpOt<|DrIpLL3|7H$8|7n#!ef!K-Pd1l!BAw zt0B@_heW`e;rJc zg&jPuU_^PaF=k6i6X&A|@C}mN>lgWY6o5>7;iS&AwDu|Z!Yvtah}Dd{zDk}o1oVz1 zU>!8$z~ha=zNxmzGVp%y`1Crso@XJr1{2z)tnGfvtGy=n03dk01J9SppcF(Ydm;jo zL!?H&@BX=2Oo^2xh{NjCGJ}T1{OJOtq7?3%jYTQbXPy%>-2L--AW+L+hqJd^z3%Y$ z?5R*Pn@iULIdgvsRs%HP3=my=$6vc7%P8t%k>cnfpP%mlKCLAXKWIbv1*@aa?h9{+ zQvV%NTRnQapZXou#!!iMyQ)is-1c-{137*mqaAHXCT7nK17CO>!=c-2pR#ZE>&<8 zi6(eAs)nBBB=c+KiyLz#FbP#jsI+X}mn!XF4Jj*kZ)C!Fvb{~!7BTiZIsM6&y|%)s zCyhe;2MY{Nc-@x)Oa@5>JpqdJEi|_hwpz~L!Q}Fk!8aUVuO8<8&sVA_6Y<~*V~SN9 zt-=&6V3We8(7nBHETK{B3GntHXuUZBpa7LY4Pd#7#IyeF(P*?)^KlTE8#>i*JJ1tz zIEtW#ZNHs-^$z{bSRzYVVN2Kv;W_H@b_ljJOJ&$9k~Xo^M%ZTfJRn0hqi&$B5vzy4 z3s@!(I^^B#{NeTz^HM{mM&@;6bG27P%8Yh^i{8VQjJZwUSn7ra>BROtZEy z`i?JxfSw$9Dihv5*h1B^*pyA;^}J@G`o+G3ENQslz60R+Ut(E9z5pES1S?RaA5l&v z;7;u?^)57coFVmXTu#64iWp!7iIBmO#d`=H(O(MG}Lg`_=d!%5Ay=MU(s*k1tO($docT zBt?|>1&3fzE*=l$pYB;^XIW~y9ewDL@zb_g5yVzNx7cHRh_I9t5 z)kDjeL!{vfXvLN`fCg&rTSHDDWk%;bK){y{wv@)~wE-ThiHkgrl{RN5rNcuo2pNoO z@nwctu1u?~31`2F8+1mmjLHigllZ0AbLHcRT(5z&lRZp9m-9TToYX+M4L$+=%UJH4 zVdTAMK2e;{in>sruMfYZN&~EKVI$CCw>L+*AH}=bNg6(`+>=CT6kK1eY=u6l0Utrq zswn6~4+2{Ppx}B28?H=)q_u-|fgqLnjcf1JE9e7CJ>p1=LYh`b06i)%>k@y_e#h)# zrn3U4sN%XJJ5&3AbzKH}Ux6NJH))GJx-Fc)x`UF*$5UA{%E=(#3Vsz8QYlz5XV321 zA`??!1Zh-SdSw(r7;8%&-VPFMzE_?mDMl1ir-)PlU$3)tdA#~eCN?mh70+c&(H05J zfx!o_pp0n=dkX8RQen8MW(=lXo~H<3bbwqVApx{|IV*8i9KB?4-(VM(OV)C{j{5S< z8OUNb?`Sq5mqCv@7)Ft?DolCyGr3E z4*p(Ta-$GMl9Sct3EB2+1RsL@M>ZguB0|Qd3h7n)n^?+CApzUixo${-C&3PjcQL88 zi+PnPGv-{othgvKxq-ds+7J+^pWmm&a#LqQD0}D{0{GxBBZ#H*Snh@0n{!3c&gR5-1|6yh<}Z(u%88(G;l(;3%J9dS(Gn%^^)pAW)eG!(yFHWhh2 zyT9QxNp#i^;Dasil_{yDq&lTzWi7D|iLJG0>FWdgRcv>n{`g}IT1}~?9j--H!j#Zk zlc@o><8N@qkELs%^d99i`|a`mM>Z0SvC(tMIaWzH;xios0;^)(nxm`9)`colZ3qDn zRQR-ljS7&ECLe<@zPt(a#fuIOoQ4TpS$5zh>3Fmape?OxU`HUqB-L>6(YDDT*;~fSk$-_2Xs{#rqbn+}clV#2ra+coH-cQP!;$Wk^GT+@c>rDU zEzppZFqOC!0!(ehPH!mM#$2v`^uKtIyiSPVaI4O6=ac;Ra-*N`)JrcC`OAShHw22_ zPCyE9ZKpEHe^>4}V`UAsZXu)-uJuv>#|J@=4-X#g^MZEr=2p z=K(dUfZU}eQjwD3HgxQ|H6uGh2MIZLT0BNM$|H~NQSV|Z$W#6QSQL0tECG;d;Dz@9 zygTt2D4*6NM`0`Z^Wln1i~5n9g*&FUdr8<`Ea3GXe5t&+`{qIg0=>zipm!PSF*o?; zc_N?VxemqA$`X@Yj*;#B6ijHt7Re@{(_2by<%0%-ND$eHBM6bAhUClU?q?6Fvc3fn|@Sl}HLhqG4JluQSuR$tH6=5&Dx@4Og$c;T}b zc7Kwx9cr19DV&Dq0t^S10E99G3@01hO@#7lCB(Iwp7lH&KZ(WjBEM@MD#8*Z35BZA z21#IljcxzRtapRix7OQ8WB4FxCK>1EOP(MOE2R9{?127OJuSa>3x&+4%WV}uX!;St zu$;U=ob>F|Qld{i0Tk#ZgPnK+KdFNkuGJmR2OGPIsr)UxB)T2cVh|F>e3ZmOF=TSa zP`~@TvP3Bh5fYGW!WUI38%OyCp1J}iD2Bbp=1WG$7M?P{s{onW8k1m?r%reH%xVXb zQ5=oLR$D88GomSJ>EBT@4%tkU!xa5aaw^N)(t6LauUK|3nGsCD|60UEV`Bc8ezYMR z<|(zai@Qzp&_jVI>b^CBdd~&moIinFyqZ%}kt}=xqyUYf@G*5k#2k=@BK@z*_dB4s zA$x#wXh7%}_hK7)ybaS&-xsuWOch(pu#Lv=yF@!sG5kk3v}iNCl^cy>_Jlk0(yJvL z|IlOwGTJ5)^4`!@)h^H9u7fxCeH*&q(@h*xoQW~D28p-n*&c1|+}c?rx3BhpocP}> z#A{O`niHd*?=FVi=NOP?Q)vohxEXGV^g6*P2Ox9tx=)c9x&KGu7C&-DmRSSBS3%n2 z;zq^T@TSkoNWN=h(B7w@`2j*Ssbsead?D8z+p1OD?@^?22zPPF?6-_1KU~NJh4@G3 zfp!+Z!ti)NqDnv-w(ZMTq~C@Z6BhYd!Rh&2|JfM+I29;6)R}^KG{T~xW8Qv#pOTE3 zwui6+lWu)5hY6Grg9^`!?~r{bIwYyX$H7@#M;}fq*2i&80dQoZJWW%e&?dl33l)aW zyE^|GKEP?VLk`p5#qBl|l~~nou)@96y0#rv&{y z54O!cvapReZVoz)NRPK+G#8e{GL9W385p`&ZXa9c%0_=+`GR%PA?wlf@APM)S2(OD zub%6*?DG|2}+(HRs@)&?5Kh0w^C&S)E$z|ZuJ{r2mzsgQw8FPN9B?>K)} z6_Vf^_R83R@&htuxgUf!r8i}4j**AIjy`Z>yUZJRhiJJLE2#4Pzl8o8eGQcXzRE~TG1NQh*p3k0sY+~Cn{0PJjP|FxM*VY(DJ z$Tqm>4|Gy?N&j>*NAC}BYv7C-E~rL8mrSBg(+>7$P@trq^eHty4V(>5zSPrvgPEKh zQ`kwX(A3c6o0SIny7oNAPb1p3r|~Y+p?p87H<}i$NAu;H7+TtZ=Qix+S6B8S^XjaA z65CIqh47pL11$K?wVgT1Gnf$lW>K*JcX)=8?fU~ zi7c9BS4pRtUyAyECHv-TJVbxJ zb|u3impj;1-Jpx$uYWZV0O~!=PggVR){{u5WPSU>TESY(k;J>-G;F(^0^q6Zcq5tM zaBPYb+Zkty|2UTJvtLujOi@?=OA2}jG-Sa;KwUZV0QKJ`$H~*Kl-SLOV-xUOaQaQduOEIw3vAT^g$48I0z{HcQ&;U zC;x($=n71={$9(0gm;cM#IiojJ>~pGyrJ_kWjM`!^3C?@)lVh>h;nuj-)(J9o}<^>+U9=GkubvO`cJ#}s#wR9elzN7m;3C(cZZC--WUTB6h%heG zX`61F^VywQ)x1(Au#_HoBd(RT%+7cj*>`1AfhGZ(0CWoq^b5#Nl&r?t;=I@v;RA7z z?!f%(aohS5jWRHs)d6eY6tnN_5{Vs6&Dft0Uv)ic_n{lV+!1UEsEBm&nG=$(guP)6 z6%w>-;eM`K^++W?2N=eq8?BT%>j(z+DBE6vQJ=geGoT8z^;3%|)=X2U$B&>BWQ*J= z)*Cw0QKN+2iU%+zr99~PKRLP3n2U? z>w{P2-2-3sdQ3-f@}!Unu}p=|u+6g(c1ax@PHDCJd;ca#m#M8nKZPY2oA03ayZK2Z zX7?!B-7VtoUZF12BM&+(`^bSDi;#>)kehH;qg6sw{D39?L!SRyHp@NIqt)ckteeJn z_UNlv8Nf;j!5`6NjBDPnb)j1dvQchEnX!*twJVety4R`=y|_ovW05_ z0Ig*nqWtDtrXfep4JJ@P{vt~Ov$W+MO|MxyKf*V1;FLNb|&x zW3tPSGW%HTZ9X~OCF;p4rNoB?6G7W@1%`!j%b^z;YKLB{htR+xE z>#JnfvQRB70G7Tl{r=zkBTFn4bXK6rL6!JrBkoNJvCA_i>rP=**nd|J4tBywT7x+BaWZum`8>lhte1Bs8&0YmQ1 z7y@K20MC3XKTxX1qj4wl$RuY(AZC%pBJ&ulF4G9%lm(0*ip{v#&ANVPSyWLEpfzrs z1t6|sI*FIXP$ESA%E!YiJ2hb`4T+xtT{Ct)N4d)>F)`;HmC2NMS;!FP?p zBN_<>4!6bu79*AW7)q@vMx*PXt~{p3Tf@Z5fvJGv3ssH_hpog~1&1?h9rkOg0BaTW z^b^0QQBPNLGCw*sCQ6!9fW%tS&t*|9@%u4Y4&j%v3FKerO8ZDM-h5+F2iN7eH1#Wc zRQYo)hp04Pe((_b?j(i4cSmIZ#`lacRH*T)kxWM`r)^Ewc-Y{3Sd#6UvYV}bPleF* zo)YVJcqkKYQRu$)&nQ)GN~S-|HXN@wNaldx@+VBz_JG7?y5&DL*#~aL7WMh)GQHfh zvoE*yB`KNKM+$bsIM^53w2%?(>`}*PL?5!4eG8+28dP5FlD?8DJKwwTm7rHCa+JoM z4X#in`{(@Pd_=W(_k4=Avq=P&$VPG#d-%?ut2B3(kAIAd?*K1YdHwuP z;Z)4+YdtPKR^swgbXPGX^wgR!JKxpzP4)br7eG&hCZCpxGc)_3pGSVT0JylCv$}YL z*>qQL)z8{MV-Zjc9(%^GtH|b)_4gV7bJB$Zg|0&w9(ar0+kU@amzzYM+p-kAEz6UX zUn9Huj6_;^(6Maf)1j$dOgaZ)$9)c{x%cpE?$hm^DKTW~kbLKu&d2hMbdqMfn1$3m zSPjf(6bR@psw{0^6UI@H6R08NksqS=s7^f|+HbM<$&(nR=2>xl1s%r+m0Pg57M_R{ z7^;BT18p4l427cUX?oC`*ui$z?HbW>jA|h-6!HVLTR2)ce~4p7St$bG(fV#(MbS=U zWRa5k>X!56Vb&al*SE|!*m?M@9V zdED}r+n<5M;V*FY?^Hw*1851;GR$aYXA%OVq-s%E;z3KdB6SN&3HHkaH z#V=wMo)ZhXu(eqK{#kHw)+_N4#GG&_yQwnbiO3BJm9Uq6eptzf0Sly?Fn4#)$tPlr z@R%S{Aqr{H5_Oy;OREu3!6Vm7x1}Gx!b3D&r!7Q7-;*8y`0|`{S{?1;)Vx8OI;r&+s_bLoMja zZE)~O-IZ0Wp!K=Y#h$H6tF!<*q~KQ}-f<2_b9F|fDy1f~fgd2FH{8HK4R?f>@C~r*PwJTfzToV3= zZDF;K+*2n*BLyc{3}s4*J-(K_U{8)CpyPjb^2g!S@xXq8RM8F8QE!967(B4?Ouw7* zb^%#N@=9#?O`7<48JOC!K2z0e-LCrv z0cBj^Sh^cM4-=Xj{X{oM*^_Yuow8H|E(k_AlX?QfD!y&mk*-^ceb*ZR7!Z)me|oYZ zpCR1#nCaW8nkg{3{XHnS@G4bTiWe$a_lKKVMOg(x{^hry(zijBAg`zzS#+GK59I{F zB?+qay3OiJ9L%d3SlHVZYI-fy`--K3Ly49lMwO@`ed3+>6vi-hJd@*rRZ)9=wW|U5dHBtJBu)iX#`xUQkxLqfjak)(k{TKbc ze@|>~M})x(1XxYOpFvu6JyxVK8%;`xlKpYjkzC4zjP9 zdT+qPZ`kyq_VL_|gpLxaNjK4jC&gY6EXGdWtZ_60Vd)I4$^|2HU(GJMQvMtRr`!+W zcoT!Z%Ntg^j=}$$c#kA--p!(JH1gN@*(!D8r_#miRP~)}n)iTgQg6BOVfrj9EL5kh zvjhLdF&gFvOGMuK`AkI)K_Bkq*}@8LWz5K%$UFnAdW1fA0yd7=4J!Q@Oy8FMtKasO^uM zG?XAQ_{&}5N2im>@4Ojy{P#MS6ywAFm%;pHyQd@k39xEEQ|e^OXj}s6H+zd_@0!Dk|_5@d7BE{Jz<04a&AyK0-W^zdIb`_Mt#ulVKuiPC<&%2$)Zm z8)Y%EP6!i|Ovte58n;;7b@MDtM8AVwV%y=QW}JyS)}VR9Ep?`oGioMZUG=u2K}sZB z7I>`xI+)v1-hF(7`YzBWL&vx&t0g{y|D}Ks6=i{N8i$dyZA<$op_7`;eYiwMc;q$p zDzql|iw_0Kbck8yFk!^VN6@ZC$(@$DUV6LGew8Lc_70tcyI`*>yi}e1pWNA}iyv?7 z9Pd2)pdXSz%2Ir8&HQDL&RL>nq7!5C9 zA>B)0K@3liT@?;Bi1|=rl@OUdXcc>AxK(pO*P{}6B9$_tg%)gliz1Uz3s|ZE~ zx_XJ<#gJQ&Pagly8#2pkTg@9hn0(z@s#GD$6Z|E2 zfFEFS%0woe2~5Po#N+GZYa?n|)fhTC*rfk+)8rFmQP2CFZi~C_m=_SS%ZiA12mEIB zi)_5X=9_h7<3H4bv=lViB||J}-i})A<7Sy|2bQ{&38KMT;sKQ#=KkY`ZHL7RFGH{2 z(&yZyoumf`1o%^(4vr@0ZpPL$qJ6SVR2joUz~-%m?qEQJ4*HTS&+Ah)+F_HTDnfm$ z>@I*Eth@L1arUO5BM;N=djsO!;d$V$6p_fM9sVzuw^90FgOaQ31y`myPGEv9Ki(#u z3&;FYIu4j6A|iMjf`pA`y}sUUbR@W2+5Xu3#&}9RCTKjJ}Zkv9|)3R(l5Ktl*M>e#l9@y7} z@1Nx=hHnj?|E&oy34VHqKq!DtKB%GD2(^z+QP?_2CzGN2^Zb_au<75FM>-jaO?l8a zz&ZmI3AN^DI8zxKf3!0)GoDVk)7LHAN_e{SlVEVX8VwMo>wb5OWF&CPwA&C3$g8L! zE|lsv8jNm7>(bct2`yop(!wuIpoyt@biItW>fZToD)@B=yf^>z(?@NgjiottZnwB0 zd7V<;LXAK_>e7TSIxarVb`1NMBqe?!dOviZip14Cl$alQnMiJkV#1b=tPi;DUj9{8 zwytwJaybQU=WK1jX5Ev+&b=iU>yhIDQNC+EW@RxiBwSq}pXk-={4+P_nl!z}5;%dz z!iA3%w^3C`>_)Q$^r48~3C7DHi@?6QdFo(c8sUmkTMDqQyOY^^8%>Kha(UvWb#SNW zb1`Di9Vh{yEDemxx2?%;R7bxW{wLUeVF&#V_|9(CTD$jl(n-K)Ro#e!7DI{76d^-a zD?x1)uYGTF9u{P08(yldeg1i!57~iFm^u+0L5D)aoF*!2gRZ?Fo6SojH>UOmX-q$pzvxo>-`&W?DYr*Q0x+ z%Cia)6?$1Dq7>XxI?!MVWLghDlk-U_hx|F>NRq{6Bmx2aPug1!#vsV`*_#C71vSm4 z17CR^PSlLS%n9<=fQg2Q4e{|)o9i}>(ZBn&1zT4-PXA=9dXHzzomaXc6GGGyXY84!M=_c)u%L}^k z&FDB}GL=~-0@GkN0-&3fSxv~VX}O_8EaL6x^>IM5F_C7W!^}hedlz#DgYbQ(`*EB` z$gytIFHp=*ej+#ca9Vx@70(l&s5MLpvn2dA02IOvOF}Rc`R2qC>-Q6upnQJrd#|Tw zMm0xI>g%1R|6ac+aB2*G)~Y%Z@we$1yqrT?g8VKEl9|t{nF>u{F^J?CRCp~*=!v_T zRakmSMJGB53Y%sM~Mw)NoZgGaQ$tReJ*SS35ilz1CXGSn$CbSJDtg)u%PqOx^j=Sfe)i;M1y}AKe zFH>6?LhEkzdwt5N4fwsuA82X5W1pu&;E{icM8AxoUqcSJn)#&b!r5j}4s2Jr=ylDf!(x&!XZD@3cbc7S}KUc+` z8jMqi&T=c@RKx|d7y*{$Gc{okP0#NsDW;r0zAvopQU9)=nBb|1Sc?%>%9=!f&^XSG z{?BO#Ea~95FTGR!fH6Xt`vDAv#IdC=k)!$D0y0xT={|!RU6f9+<1i_9C&VhLyZ0XM zTrqdka14nP6K009!Gv^wGW&c&@yJ{I=Jq3u?g6XE=&f>!IE6&2#pa=@{-YO>H!}o5 z8hO|RLUGD_j>-8L1hXjd16SHmG$m`rBuGtcE>XJ` z={`JmS#BGciV378d62%v6|&dtvC79N_47KTuno3AAnKA%0m!dtYuZ-V^LzQgH8k*@ z8AS6rZUe~aoW$~1TJxfPEjAigF!`ja&g-{)Y>u10Cc7LjD*KMZbAL+ZPJ~zo**Vzd z;eC(eKbJu0Bd85Xc-p6b%S{m3VjC0ucih+|yX(!5-;)o0*$~KuCBDwYhtcHBvf+Ff z2JH;qr8oyW!(pbUKBm3Fd!YyU(%vNwBOC+P&gSZfCsj0DIegP?o0FyM&1;Z$_=Tgr zLF&ngmsy?Hd!m;-QlvD?g+fnLDX$Kw(a6{hq}Pyde+%rSf%oQ;Jg_7J4egF#Wc9?f z{!mzjD}VqwvNhBC&C>sm9W7%);EvSM_p6Z+naHMvfNxJGz|B}6@ZfCd`|sC|(1jiV zo>^|if{!8|Z(5H#Nmxo#EItNbM|h$|)ymXTJF657>_^JH63-|=IU7QT5j z!qcN9&w}+&qK+-FJ}qGD_mM~RN*a=9$u!t2fnnl3N}U4!>YwoiZ4gfu&juWHGfo^V zPTmoGB5c%M=3FxbRgY|`W8HW_fnNpi0K10PvJJp*CBLhEk}<^dzaMoL5Icl@|2S;? z(8&jMp(nd7rxnmNj^!(-&pJ&92?-OsB>w@D%4rLV zkMtwxDW>mI5Dx#s%XkCQCBB83oJ7A8M5gT%KFZ(1H{lAoE?MD&C2E0eXX64BbBVi; ztBhE@b{dV|c|BfGVWqCs8m6)e?N>^>D`xXuWFDZ@fME3U9vup!vJEKiz2lrlm}=v7 z=Q|sY?3CB><98!$S-XUDuKB%=P!S${MjWG+)3}IXbC|bt&jNjYv z`0gO;k7l<8EG!5-5-u&d&IeuupL%SYCJoUwvn7?@LJ<;cOcm>CBJk1uyKP7L!EKJ| za(`CRb_8x*75IsUG6xt7|JC%EaiQ^l_TW^oTf3KF>jC%Otv%?H2>%|v*uRmZv}a56 z1+sP|GE4jE@SBMBeC%@Hw<2QJzA178ELI+s2Sr0`s&0E8QW5*vcCr@u5uR2t4*YZE zdlI1QV31djy9}$qO-HZ!*i#VavgY??4YKNzN!EL26RP=q&zUIs+JxFAMZDQZ!?v`a$`Ha;F;^!6dp%us}t*FD?@hicb*Vwz>2e`qS z+CrVjiFO=}nqWo5kqQT2jB1Gd8huFf8K@c_IV&tZ!3Hsv`iK+7)DK3n2^@d`@4d1s zC3R4PnXCf6pH>d3iy_M(kN2U?j{-uoT*-uqCOt!blib53|uvE9}YC1X*$LFl(^AQ$DfgCR|3IRZULLtJtvOK|j>qtHxdHk8#{!aOV#3g&UOt z4p(#FO-hEywj!ZQdGM=mLw4CArB3>P6F{J@LL<4xuD>rUKC#DzmyH4M4F;IwyXFHn z%RhKx=+@ZcHoX9Q0g`1sC8+H5<#=ba&nbh$`}z_$0pQ~M9qN~|Iw6v5ovvs`?6CA5 z4i~5kSYg7OEylQD`NF0WFfNU-IAF&3bvIQ1t+v^bfi4CZw^q$pz21_-S!uqNP1ii> z{<+%rCn{2^)15cIR$|sIyjKW9aUT`p*6d;B^PKp^cdgbdEDkrS?G9I)ynQwn&ew1# z-oZw+1K7!P{D%}mS4>Z*{`H&L@BKP3m<{zW0#%eTG>ZTkV&nh;Y=i)QIjfMl>o)NT!dis zeD_c!@cK!L11oA9vRnHIRANxKT*k1%EHXZ2&B!#QTHNqp>cpukD+b0Y522hM$Gi>m z=0N=c&h6wiJAj{q`)6?)P43jxp%c-ckTqcMljydbv(P^2@#(_FPR_lvhdH{MpO+lN zfX>k^A|oy^}fTVPno9qirz+QRAp_<*wQ=Qtm!9$wJ`pF zCD1U`xz)5#{BT4L1)t9O9)21S3>*Zs0&#lbKBbOo_+&EVOg}l?-poia<-Kw3M?)|= zMvz^2zkA9*7$73%O8CSRDXH0T176!Rh6xD-7JiHyn-9v`GLSYT^Vo2*G*`Me*QefrR@J!cYlL0y? zs4`yR*aMeZ^Z373r4t9b(A5OniteL=gn4kl(8=3FyH$)Lol4xVwL8wOg)B#f#?R<)tR?IE6v9-(LC zTmAkz$!DbU+#&yQaKMpnMO3NSzoYgNRKB;Qxl#E4?B@v%Z2@eIkxm*dx31Q;7f!R0 zFD?d@1WcJ3Qtu1>`QyB*Rj+S%pUDh)Ebct@*`;NJd!$$^NF6r}9X5b9q;vrktvP&r z?d4y>hQQEh4@RZMz3q1Q`Goo8XFY!+ocCDyz$@K*E^E6)FTOU_-jPQxp&ii+a88d< z0z5M6lT@KhtO_m8vyK49_FK#SqI?_AR^Tr%ynnpQXzkXkL%kttJx*h_+^aZ-3=6Sf9x8 z%(z5tWxj;b?>>p56SY3ddjVFD4nrh~X?NnFJi&+ZgrXzU0;A)pS(m1Zp~3t_0w*PL zWP_zGf=iP$4vv$#nc--&0K>X+EK!6l1OCV;*3O)_vgM~1EfWzb-D1WHRR(o)rC}gE zL}AJqtXlE zN0?8C=tF`Du>H}4!_qU#^Ic__9TN#RF4?MsuG!6|C^2$$)!5vc*21swj_fLX>SL_b z2_83==@&<@%x(*67o1h{Yk^!oSgI{gK3mji%OOH|@g@5UPBx7=Es- z=-jfBs=f7{L2T=}%baXQrb;okjkwCK2n^+Kw6-(;pBLb7Oy09||Hn2Q2StzrbX=Nd3%kMfu2kzMmBJ%XtEHBk z97IBxlE5cIIHLrKE)Z5WP1>1DqYX3VhD7H7Wm%NGl{(=O@4YuCzX8C@e#3*pDveGm zqIwKSQ7)kG^eAa@oe*fS*KB>gw>=~=Uj^Ru@l)xE*0iidFtqzEiQ>c`wUImtE@4p1 zyae0;DnTN2HM|K!THttm(hL-eU`4HX!CPvtzh%;w)8j6`5)ozwunhB~tFZpi?+>qnuaGi5`a3MiQ878*51lF?n+gSp2zJTM@%x;E#tn`A>+a6F zqEz1lUyBI6~LBBcFY=7SD~ zc5VuRCE&Neq&!`l)@m6BU;q=4IeVq=Jq|2fi>$D!_X;-hYX;7UNU|sZSD3Q!om<5) z%J}|RvE<|4%JN0u`VW-^o>fcmtkz_@z8e`6AzZV%@t_EI;Gc^?Lm@Jmj5?E$#99Qy>7;tk|0cO_$}hmGQl~N z@aYi@BLIisTiF6?`?+q@LVjGVB@cBd6Ic7YZLo#W8F1@Zcygpf7arsGQ!!Z$<=737 zB6^lMp-np=_mAZURwB$m0@*!o8kI+mKxCKjPwfc;bRoLy2^Wokwq6bHQb(wj#yqum zNVJebctT(6hmc-b$}arI-uFeO^%WL#*L8NR;G6BFMnv^@5+VG6@5<5E^m6v~n+^Au zPZaCAr=~pUWozfb6Te4<*JcJl`fr-YuNXU?kAZ7rbGkH>2d*${Y;?TRY}cn3OQ{&; zLC`a`g4dwWp_Ys}10ClPGSSqFdqM6c&D$$@;_13~{(3=1b612=G#|j>kG#%8!7I~M zgijKSdj2Z#XFvRSIv;rWqxe3AK}yX2tA<}~K!8z^^Su->Mp4ZL+8(V+d9 zU1}RCP367f`vYVH7DP*Mx(Sr39a|R>)Vh0;sO@j{^?<&hR=ig7se{T7EC3Gr5=Ynh zhWA?PrIhj}@JO)~8JJwB$f%Ofj3rR|9#)he@Cgd=Up)N_{^O6Ac&b>|PrF}Tb5d_K zIzFYG3+TT=w;qpb`zRf(u@IT$`1bo!6sz{%1-z1+UxVp^ zzrLqrAfk3Xfa%QK&GvG^9x>m0)k~#4kU@vKR|gEHmII@dkR{MOuuFgGJ~rzA?ubLE zItz9d^3DM<-yP69?Y%G0aiw%rSTrBTiiJ1d z2;(s6hrdX#l_vk}9s7nmglicD5S&3c?r=VEcRGTYdUQrxguBh4$ZOYSd#27%r}9l3 z_4&7kyk?(i&#tIxaPi9p5l&QI>(S3Iir`bsWp#oEOA0tjt2W5-{0)!}&GoLjpP;Av zd~+s2U2`(4$yE!Z?4?c;Zo#6L``;PxcA%FbsY;oAhobRY(Zt`v31C8BfkG`(1A!ln zmpdR1en2W6wrC1~&@(N6Q~>vm#9?`sG zaQ^tEbDI#4VJRgrlrtV#-^;z!0=y^~f%@gtDF+*@iXm8Kj>GP_>tzfXcz!Sa0^c^9{^63D`pY z#Vru5a8^U;Hn|&;@B%_30>OT@-=->r&5VmMt`743r3o+TtL8p1A?3Nq!?LGICSu)W z0qxa17q-kVkjeSnZwu2K6hHnq`Rpj%G>eHKivz0A+Tf=^4-AaAyn3((GIafbkzq~C zNWcD?<#pyd4N9L9OQ}I0H$|!KYSn)FXp`d3`J;~)hdGtk``T`GIA0uz(IkR!e$oh` zk_OqloZp@mEmK@3ueIt`+6-ZJ4^lG6NbnsHfJb$3Rd;Er7avv{_X`7UtXk0|+i5#> z0qMk)d%V}2I5TAsIRZ(BG$b|%bInI^8UJ@9Ajsx$aWeU_3^cIQ4=(2!5m13BMkf-6 z+W7gSw6ZCV`6DW{=M}a41_U)wM-RfNMPx!XM4+uO$ zE?Fv~pNQHIf0!>S$fpSd9{$0u2$rUecRA)&agNqcqaVeFprc7T*;oW%i6Hq~1tN}3 zFrx;yr4QVD^wolC-u?hc<5FA&vJ9KvCmTMFNh~|k75keba_2kXAdx}!&VW@Ja&4Zx z=mZF^rv=gx!0~*$KJiDU>Tihq=?7K0jU<{3N~u|7jN%P}eGs%m%%+Hn*3{<68+44m zj)ti@m(nzS%Aqp5zP-bg|C&t#-~b2jut)g4(zNqfAaS%R&tDvJm=r1GjALemJz!U*qEh3*3r_u)s7t^*Vg zKx1kRa_K9qmIwv<*0&tFZWXg_LfO$4TEEz;7UHHU&8A_Luzh z9M}%83inh!Lcy^FoXA=>UU*-A{}q`!EgBC!H1&=$vH^EB!f6OPVWVbx|@oS z=B|C38<}CGD(f(Tn ztIr+6VCxak3D-x@(d%?A8gxED5(q@-{7=a~h9Hm6-qA~QxKM*a=Aa=MlwwUJNs7+4 zqbfS^G671am{oR(8B*)J#}dFF)y;i3-&vbR@Nh6-mghMYmsZ1De)sQLZ?r;mk1LEW z<8&_3%Fq-v+H1g?<{EaxKvNDsLQoOSZT1@s$a6EwUj)7ZNp2VV z2;)$Kjj?8lF&vh?y~Q8wmu6vD0F7BmYyJq(|}A-H;)To@Z-g$j0cfb>tM5 z=Y71*%I5IDq4@uZ2P}JKIzn#5*0=}~SaU{U)uvim8~!;#o0w zVpLfUgNz1@-R6umtbz4A7EeGP(Q}A_MLXnf>mNXEc8 zkp?_mz&-2wHUL{YbdkuN(LUxH<9-}pp7dU&`vhF`^l!;dD?V%I9@uK?l z2A}Hl0>cQ%oPSw=K}X)FGlO$$WeRKyEA+dSvg7ybEc%`f)jhpP|9iYiQD=K9YgYXv z(U9-K9N+bljEOBR!cw#DtPAxRqL@2V$=!D|IQ0sX5o^Fqg~4%YA(xaQ&0FG3+3Y!l zY2n)Y&Q~LaVI)on#?6URhweClcQ$q-$z06(ogehQFnSAI10E@vWc`bHp~$#y=W7;w zHS>;nicr_+Q2@$v^B>_**(ItR9UyztCbo+5RaKWisEkaZlEH8wquO@h?Ro$90K25f z&2F*VK18{5NS|Mr9%~7sFqF&DgWHu!4OFxHU07)oYBCHZ$KeJL@3!87_kurJPopj( zQX>iAoDG`1foBrOuO+Wcvub77>!LM?0O`Df%ragf5gJKwJ!d0$} zj@VGO<*aHASB8wNP7&-7-i-IY0-c^yw2+GK^{{FnhLEE<*S6%{;(0WGcTHE6$-#2c zy`0;t`q#au%{l*7e_&Yr1i~+Y6u>(ZXJO)CQ1qnBa?_r2!v*e&Ym1AzeR)`8U(W3P zK+_4FL;Cg-4oe}m$ADntuTN<{t^_St1e-nQzzU1C{`v8F|2dHI!Nqnti)@9mUJmwl@Kp0e3DE%!ooWnf{&ei32n~o6 zO+Fr696h_3gn(ZKjBpvzYdvSesGf$!tTM(LlcFNge{DerI9L7lw#ZM?8r(6T*<3WX z1bEy}+%`M8-T+VlQSj4Xp?%^-$1WVL)2)c!#S`^T5_2VJv<0OglQ*%uKb#-q(TqW0 zf%0#4(;(R_OMKk~0oNbBSNxi8oljH`J=vF~X&LRGtAq-lzOO2u?EIYss7(-h{d8FGVzANlpQ zZZ!wF4Eqo+MKbC6-V1|vP_f&Cou2!9qSqp%WaTyTm7Kf4;D7yrp-2$3I3AqwFXCdQQ8qBR2eJMi~8XooD%Km)|CVd~fFt`e3YJG$ab|0ty+77G8A z?=5w2?R6zqb^6CO4S4%xvO2O!+O)EC4p}9{Ym4+7XndgRUb!*20sGrAdja_25~;m< zluUw+D^y7XbAY+{;jZnBibuN;x$1al%JG5o%x9Jvjj0G4uV`efr)IDm!!#$Rt_xrtW@%6!t7$I?_`^q|@*H+kb!m(j~fS z9qj(=i+b;pqqOQPs`k4;0ua4(J~jOPn{OpuK?6Fq`~|W=C;5R2t6V6nWa{=AXb|sG zmtnkQf!k=5Mp&nU+M=Q7Q{-S8kh6qs0=s);TbH63x4`ZIiLzdI0|Pi-&-k;8(*+me zQ_vgiTpwM5#!S6uD^g6VUzA#TKwv+azC-x$Z>=>!r=fulm`?TQRX+*&%?f2B%Y!=j zq-<9EqU1WwD8cTtf*RsDC&a=M?^9p*qI|G;i6@px#8zWQjVO#G#|GvdFQ;f_YfxqR z_XHy2?uPMRO+-rAypQ>g?q+odfhA|K;zQQuYeT{EQ2l(hL{Gf%nkoj9Puv)$m(Z?# zhc{Pk_RD4jXbf&b+&}V#OI!GP5QdPuTV997BFYd4hd>> z$%C|%!-IQw-gdLgPSvpuu1TG_)q`ysb!Bld!xE!ffMaOlSFgUtJt33}C@a4=zV`Mx z4zDC08dqA*)fkE@vbXzH8EI$Kl`@;~RNd)-A8mm5tVxVEa+hNHZVKSR>1>RiU#Qgh ztu%wJ1;$f=vnVG{y};aqPvZh@C~y#c+ZsFc{P!sz{cyYJF(45i_&EaY^{!_*DkVl? z4k#U)pW9l^q)H$td|&J$9fIXxt$&>+k&s+)rE_^Hn#Ef6fTpG;daWilGO@0U;_t2~ zcYY*UAbC%KxMlca9~1g6zBww}cztb+s*`nuGu0p^BDPU5G64eyn#XD)8RKj|2v6z! z{xN#~UZH|z`Y80LcOv_I^huQcAkS!9*?L&goERNu$CI6*`Lq-Bv6G3)e(+ZR-jHZ& z?Uj*UA)dZR&%Ou@hug8{U;99^pArC=?me4~V%ojF3xOl^^fD7r)O%jJu)W~3>R7fs zc%HvBEA0NGRK-JDlCZ9OAZ=W3X+_f~9v0+E#8i-u087AkwgSJZMV<@|c3N}(tRS6S z$uD4${10j&ngF(WZKIhUY`tO;?=sII)Wn^!8y(iJ-H;xm>)hOI#*nnWie2zfEOCR^qpZ^3dy zTa^GGkLEm$`(9nSS)E_c=%Q(F=SMK^Hmb3|3#I}GK?dro1gR+sMg+aTWE0Q2R!`7f zKh0J<$IJHM-P>=fnYa6732}+!DRCT}3B=2CIhqkyJH#?{MjCHQvUjFj^O)G0_St!< z_*mG6;>wi~;rxDdz@TpppQK2%?O9jNk2ERI*?`mo9A0~_^y>=j;4-mlhwMeX=r9CG zcq>*3@xz<%-Ef+E8G5Ao{1ezXmT;NGq=ik1jUSBLWtTQ#Ezw{y^?vdJEjU4JS%JMJ zIxr;$MlUiAw~bEeRL+m-u3FDO*c$7nyyFPW-scJqJSoVC26G^qCxx{~DGcax3KT-b zEn^r04@?Z{lC)LUn6^4`R0FM@a?}LhK+w7=om^%SomlM=ual|zik<#C(|g1MGYu0` zdNDR?uP;g+Pz{64ZN9W0Az~xDnKP~f6OJ36yz^r6M%sQT(xjpEJz9Y^B=TCuku#c2 zjYyj|zYDp>dK(v2$*J7rkec4Ud)T~v?q2lw=eH5@r+*p6pU!O(&5deKoFXcc9EPgT|DL>kBE%-i&Hw`F z?mob9v{tg%SL3_?N_D3$uG?!%&E1|&bY#`!5VSW9SuYGb;3IspWt2?xmofP&ZnA*) zyE%q=eP>rx-)~rihwH28K2P>?^#Aq5Eo}{G_4ll|J#?~!Iv@whhms|H$j@}DCeNbM zzjEt3*Lg`CP;CC;QM!VYatO13Rk@K;!@0F}`1(@Sq6TewUt0!`Te1ju$^)KA!IZ6o zqO)=cRM<6-aT>54>;PlQn$0p8?vC!z6R>e>z!G(;(-&E@1LQWJ^v5qYRF<;+NXtbJ zQIrwEYhN2tF2EU0WZ@K6*V#lL+G`#ZVeR*8Gx zSQv{Av!?^iJ zqO?og_p)t3?^p*g*{n~fx2!6=PcDVVH$E{UR5$+u*6uub=EBavE|l-qN$fK23&ujn zYbZ4>XW9OaaMvk*78|!+`-7;0*AIju7U>`xL38xz;swT+#>&WWj`1O>u$Q(L?|XCv z-V}`8>T_&R>eZ=}>2j;rHaual;^(g-B12xW7LFz;^2@QNcAZ^(Bs_iUAVBS#^{wXc zAlJcaZQk?H|41ru_jiSFxnX?;DU+mLUIpN&h;$0SxH5EN1Ea%t4-J7QS~IATGL1=c z8iDmxZ_mA?{_s*>wvKvp!~4GMZ6!;g71+Z)z9c1gS6%}>`(PpGtlQG3&c9vMQKNgy zjlS0~%e_gHWOD!&_28z~GRx`Ft{}KK(zVS#pub-^_1hU}qc$a|)|PD2bDHPSmle8` zXH;MDoY&G^Q=`N(kpEHCq06Ye%mGi&I)DH_f5534=|raowEhzFZ?&afzyz$}CAc-| zX|Y9yalsH}RdFV~7zGI*ivZ~l5QCNg+5_**h9OX5+PIuMVIROi4M z%kbqd<-t{Q9G7F#+1}IT za(?jYHalaL6jJb^-%?})57T`c4`?f}2$b#>dTIZx<>8(8030f>TxjZx3l_kfiV~3G zDYMEE#JiDxtqB$GvJX$bLKD>TvS~w0L$8}7Czj|&klv56_tfuLA`eAoc?g-J$eHxp?XzD67iFW43- z`vjK0CvhG;sCStoScj>KX{95wOTz3qlO1%sk5}4Rl^Yeh@DyJVDs6$>9xo8|6I6~m zpWfY5z>5eV6uYfk4k$DmxuIX;+!J3BhNZNa@%~{wl+@F3;Gsn{jtNyiri?1~z?Ul! z?YRT?b!>}b@rZ)2zV*m4x2KRhs~VJgaoBa;*UIHDD7jwg=20IKeGfDcIyoM%>t=tn3Po5{W9vKmcUmzi;n-QQ z#iYalBX#PxqVagN!QK2OQ-5GTcTTP)aOqu zp}Ns$9*H+r=I>eVybVb>T?_#UOLc$@6MRQVo^ACpoqe&${bGyhcM0Zz8~*tQ>Ad!H zDoX!iVz$}h!-Sg4sat-j!xuoOFM+8Chj~!HWp?x%8>B1T;QHn8#9BTAsnUptr_Dfe zP9aVi@Qc>)#sOzEzs{9EeMluv&&Bpd*oTzfxX-uIx;^Z^`Lg=E7YEkq2tzG?Ifo2t z(pm0jr{!gT4(<)TC%i&gSnLndF%^UzO5>v?F2l7#ogdGBYI8ANHhdgalH_Bl%sK{( zh;vifM;7XF-OQ>-%dnT){GS(KUVq2D@vY5L+8eid8xNbMPx`#BUv#b~QTp%_O&iFA z(5bw-xF?r?(EXAWOOEG>v4cI~{uxr)QbC8gQqo5|3-$NbQA`S3f88q@o@?n&qtnis zHY+o2)bn~JzhH>6Ub}}?&C@Ss(Vx&?1cT11I;i_96db)x)Z`@KDj30mPdTCC6czIC z$L>&WhxIO6XD?;zWxl)=>Sl|=+GFV@>gRlN81^BQ_Utt|Yfo%PaI;oeM+n2^8{WeJ z*Y^B}mosH9UnH6N%kjgoFGYI!T~92DBR6JhOH(Iq?G*s_%fG39nP%B!6z z4#UiJUb0Ea@fFa*JnJFFfwQn!hSXS`&^+!2uf?#`k@@5C>bJ>Q`6;s1eemwTj5%QY z#9G-`t0kF1a)0=T7_ERgFXReqTLwbKsGdX#?}3Xl{=5H2)muhI*?#Ze(nAj=HNe2o zC?Va=45%~`(kUQFD&5^(f|8O_iqa|FD5Zq7ARPiC_&>+{_kGrSUf#aA*BY+t+UGv^ zK90|Uw6vVyHK>)Fsea6HzTp2cfXOPOsfQ~2_#hTBwP1|jgx8fZEl|u)qvnXxpAmXR zNF<^yjlBwyw?XamXFeumgZdW+7RNP$n74WYtIpSUcbO>zLEVuEAmN!w0httoU^`Y5 zVb@y4hJj_pG@^xmu#|&rP3{}X#!ci2t1s^#v#w|O;a{$g)KjD8L#Q^YD8^FBFF%hX zP$UY!6vT8<0&8DFpzfyObP0b@qQ5r@*c`^@idlmX6?*-FxU%wHOdrf0aD}&xz5)=a zF3#6R(vO=ydg0(drq23>GewWO3Hc}!PL0&{#p=z>-(pJg1?(1~L)`SlR@cGtH}x#h zWEskjy-JLMfb|F!a-Q%L%vPE3_+OnKh4+yYgd|66MJuqIDg=iT^WeT8Uj7VXdv!0r zs-t%DNm{zxx2s-^9rEk9#kqvRs_?#bBjk9K5h*|T(-f@v?Li5 zjf8dmKb-W5V&ai9rG-Y4VqdeDtNHVLcd(YSGP&C7}S<+jh?Y`;a6MEL-Z`dI=Vvkhddy zI!j`B9c0}`ys7k$!Vs!cXjQ)O?om<;E^Y8^4*w@jj^>E(L9mtiULDv|a-LUxJX7Z( zuJ61iS=d}TgOW{`%63mbbMY;R!)?V9^zUX)w;%p_Yuu#W6RkC004<)xi&IY{=&DY` z=L)xc`2rpCQ(^F;w*$V*PY1r+LrkZ*Nuc!fXoW}SvA`N|)Ur}5^*(oN-c%4S_0)7+ zs?1ay7ENp7myC@`;ya(dbou#% zL|`tj6X;7%#K$?rnsFmWwibO)lXz#1Q|*}&&atWnp{-zfHl?YoAB35U?;YmH#=6j5 zkw|TEW%`J0uKii;?p%i+$?!^5fN;+^le%g~yzk85k|Od~-qHt;Z#DJSSIHif$qr5K z1?$6aWBQF#?mJ|Z@d(HMeMa(Dt2N;Z=Og5aA)oPk6u>c7oW#C7h;wO`o0ZfNv;Bkp z&di|Bt;FwiCGs9f_F9gt>wx)Td>nPnSP5F~?gQLVRSltoi4WuzFadPCYsb<4e8eJ&%-2{c()S^xm1=~k+1Jvih zr@*r^@Mf^wRTBzyrVsIvy7C3O;+FY}ck|%2h5(@jLcU+Ko#q8F<*mnB)QF03V$@=8g)|kDQKv1eZA~Cka-; z=>(I9Xhp1a|4BIEoVY4prkNbhynEk9$-SNaS*sR0!d~Av;8GwA;Yh$uJVTlL$ob)l zX;=1T5GHSuzxvUdwvb%bs?>M!qc^Q`78<p?BDXi19v^X%*=f zpnA+(E;nDMyX?-iwQRDvd$~wgUjsM#w$y6g_$BZ7gR(E>OIx?PZ@bZ?vq$!jcDs(u z)w%79Z~gFr#XFRn%Zvx-I+`7uiszCWlgIrmY*h4)#N=xvR$gWnrzB`FSBu%be<{l9 z?l5)3P=D3QD}JY+o9taEsntiD1NQmXepSD{_x7jjZO?ahb7g)kztt{|c=Q2)3+i-A zj3&R_nH0hkhNtL<+d?KoRM6))=E-1>H;FvEPd!5o z*+!|~>-G_+wr5`Wlaaj>B4f^aBt7NX=!$E$&-8~2)u(~#4_hdURO;y%yc)V1Y3+W& zOt5&qEm+XU+saoYfVcy-;VR}}w?k(`i}8OEXL*X+uXMkmbf&!IGB=6|0?`0_nx zMh{tq%8yI%rWDXjAYoiWkiPRslJvfKI<8vkGW>Sb?cV%X`y_*Yn&BC{}naTKqoK9!S`vScZ5QDpZwi(z|bmlNKl|PRo2Tg zMYKE!f*krK6_IH7UX5X2oUQ$)QDPcrT6qsEQ&*R`w6@pmXs<2UdQmI|VPApFf3)aP zX*y{C5{3x3)P$#erm7d5Bkmp%PvSJ+(6)B@%S1%~#+r&GNN#{z+sRMFIhR|wTzhW@ zH%j3dGxpyk2S`0I&}dC(RB=J0lfuhX@!9OugFKWQP_mtQ?k7v?Sz=lwEt9(>%*p43fIxK%&#ej9xD;7bFr2Xrln3@4x?5-Sf(@(iC z52#XP*RRi~^CQ3Dmnht3Be%w>(9wMuNrI!FPJR^0n;?<&o<~&=J|fB))>Gj2$)L-t zYeEeCQA-@Axz&8+JuK!9$8ckxT9-`;`ZVIIP=(ENH8?s}#F&f5^>-b#m68O3J)Xl? zJi6ka;5t-uz~V1ApJ`Y`eg@p=g=(@jUrEzhm1wXnjr0T;VffqUfYQG07oAe)lFZ`v z%h0a;w|HA2M{B=vtarG0xCawLCGOC3nrg8Ns=SRl+kQ{y?}F0?(fFl*D7YCm=gt^Y zB(I=z8Y={arkd)pHF_mg|-H|-}+=Gpp9O}LBV3po5xh?5E(!c#oEh#+} z$(zCwiNUJmWqsbhwsKW+lKGkCp0L`NWV&7r^lPA2{>R4VYKEcSX;|c~!!o$2nl0&V zN^CxkLToO}!K7FExIi=ZWmgHSw#F1-oJlcvNz;D)Au8_WUJ(O&+tK+eplc{|9~XZ0 z5(Dd1W(*b24?$~!ez{keS8@N{jS=8({N2FjXKINXW8v!v>auW#t%b>se-WRKF@s>QEMsLrzvneppj-XV>5E`+FE>(?ovFb$cc}f zrG}93&`~V%QK%EAJ?6AxE>qMJig+kaAapL{Llv(J?rKCf>UuGLNxskanmwluj{1dP zBZ#}pfIP$PVJ5G3%cHvwv6*)JB<09>VuF%wgZ#3EAR;hU=u^(G_*@z_lYt}ycey26 zaP1928Ue9#AZP}QQOPW{&zVXXRVZ0mKEeA$SRLW%_002P1EmB0jS<{wE*>!g`LLmp zp#H2I^B^llA8Z3dYkZ5(ii})nD64e+QnkHBL!Hu|d?qpn1?2)dW1wTVkmv6}KgMT;W zPrT?u-G29wv+%1N#%i}5{~i2!7O8Yt!pu$Nt(XbGPT-RN>E?K(UsGQP5;H2IS4kSv;!i|d3#g7lyj8Gmw!Fzw0Nh|k@Jdk3DaWe z+D4=%D3Y+o`P+xL60F7;w0yi$6iY0^e~7CZ?NAyJncs{gp``WHdZhjvOM~{RYhOp| z&VGs}sl<=no_Z8kxhH*ELu7QETeZ-n`HO>TK-;+u5!3#i`Z@k=TmO*fF)OQDdbeKq z>jH-`GJQ^CLvlBq$S+|}?&z1*Q;z8T$l@LUD)cbm_s?&di)wGS!i}ZtG^yCA=rg3v z)HY=0)i^RzwQ+_x<%gEbjYk(E5II(TBiU2!CX&m>Ap}j}lCV<-#A}ke+YC;lkp6*0 zNK;DPM>u_{#_Ro`6Cmk{7la$t@7uZfgj-!lOVQ0>fvXfhc6TJYdUw-xCu=G`o1{R% z5^1U%*N6-46(dsRE7=US>-Vr19KyE24vk4^|(`{J$%N*-DC~1fikLbqX1;1 zdW}mgL~VD0-;Yu6u%!^}C-ut33X-$%DulkPM=-Nu0v;GL?Y5zoB$a{;@fxyf&fw60 zl~48}S4u;{wA`R9=7va*C#f_)jYpU3$@OM7r+#fA<$(VkGEx6E{p0Hh)q>`9q4Ad- z`lf^FNsq}Xj_O9$Vj==NLr2VC%+!KR1+J^;sOx-GqQwr zSq>_!ohJn9U_kZDWz!=ZQ7*M`1oGB@%ULc0$FIJJtYU#6m|{pU%=Mxt#CUK}akFq~ z*9aS81!J1-tVD$|r`-QUnSjK_w7ES5kCRh?89!l+;G)NH>84@zh_Jp9x3Rh`YrBd4TaBb&M?;*ARUAf32a)Pd*6K;7AA0;Yr-=c1 zt6(tcInAe=cuPU3>AKmvjIpA{5#R|{P;}|F$h<;gXeU?Xj+BG499PwGbMyH5)8*<2 z$&)*H4)<%0p`L3XXSb|i1vqF4Q8t6ZaO|?*54hu&X3bt(bg{gm{}Nx*aTJm3-AAkN z!&8iboi==37C>sWYXbbm&vV`1C$2b924pt}q&&UYA~k%!XE)y@`%^FDK6^&;YBXD@ z%A+RVQU{kN1uC3-cMKjzH-vN{W~O%Z@gY^8C_<)fh~3qph1?;4JfV(V{!%jdaO=B? zl4`qo`e0d^`4h6Uy}Tvgas6ygPPwBS9dpd8RqB6sJgC_IX)KQiDT zKDcv2c`^rdUfVCXpr^;{evxqH!g9fkf__ntioiDe3MMoE9QtZ zA}({6fRYV(oZrD)y#HURg~PPs8l|+1Z-)|gC;PF8=ec+cskn_N4=^FJSa8Q6wpemM z*u(Jp#MX_}9iR!z0atHVL&yin>ehLWZ^x@vHY%xi5AL5qFz12mQZNuQrX+GR_i=2v<_49kY)ax=CaWCc>HbH8mVkq zMT(~LO~UFc-@|8NA|TM>vYmx5m^?TsH>s9WcJO-Ol7anZHfTg?^b*YXF7^d;%LNLP zU8FoN^8d76A}@UpR190Gx?LY8mVtDKKpL5cUcgv9xySE2Cvr%Ki#!YOA>oc%;Q)R2|PxrK|52I6LiO)&`OHrS{ z!#F_tM$@yv0XiIekLN`y$6r!4=vK1Eo`6qst(dPp34g8MaeoVlp)NI7=b49YbtelC zLi&>);yt$nb#$NrLbF2iCOFdN6IN{9Rt^FtAca+KOZTQ)W&Iu)Nf)RcVG>!^tm1}G zpq*ge4O4>O5mQI}CSSny3WI?oQ{1$BQjf}Z4V{QsnNnvPPiGW_Jq=OH8P3izEpusv zhbfMm#Ncc?f(n?`6R&G&CR|!u@%}>MWwQ zC=9vrusKXFN+wFNB2in@0wk5ygM@WGWAggF3a!r^NF}$s`V10nmI|lzg&CUffC+{3 zKCV>ptxK4|{($&>jN8x1Xn%LVJ6S<8siuN$_w;*3VU_JX>eY~imvM^}*H@+75=y;J z3Kiq_Q><0Y)2E(b=1|}@(zSl=b|L0=&tXh*)R%fUVuzQxtXDhhndVY>eMxBs;fAo) z_5wHwfG~+Qe#jKAd`y)}L7w>cLn?N;tDZ{psDxK2p33gif#PPO;vo$crfaXGaKDe&`7)aMxc(xa`Qat~9dI~t3t`^TpGuObbpBPH(R{SZRliL=bQ@5_^4R1)aem=b81 z?`QI!m6#Ur>}=G_eiB zBJkzS@B7Xjdm^K*+j=7PmrcOVF8q34Pv4}3IWPIqBW256uK=09f85z%T!*(NNY-e@mDH`m_8_iNMi{v%?~Y0;!}!| zX-WDH#FDzaP3tL1(j+vUG-gS@oEZ4=&zoR*WKsaVI@&;zS-LvT%uF2IlGY(c=TsIXN2L zN>dZ(_HFjJM0ERojTY9Qkrf`kmAY0+isY$%h_@~mF6YzLc91ZFU~{qrZ}MaN27AhQ zDnk7X7Vp^MY@6}a5N`s|QDCacX^)`v-ePdmeQvQ!TPVJmYxb%vzqX+9zHviVoKR_=cGC|3c1=3%iE;P#bu$Tm6$q?5?HLpj|J8PC`0t`*J z5sv7CL>0Nrl}ii6s7mzujA=L7YN1+sh1;YuHQfpgH0S7VK^z2$WK`h%%|nOJMeyl+ z6fQRNj2GVktrw6&mN!q(zN&T;DZZ`NuPI9D_ir_vE&cdvWl3YR7A%^$R({0nB{ zf~8liLwIz(#s;IM*iRGyo4j@wvqwe+-=LKvKXtV@{3I9DkdS_`=2DL(A*SJ%o30n) zm-i=Ffw($_S(Pm_N(Qo$WnJ{6X#e&@h}UFuQ(!Lo@4LOKTXT4HD<5vDrwu7r-0zaY zDe+t3)@FUCC0@CEiLqfzbWwmFpA#Xz)8 zS;xS4$?xY~HkhI0XSEIa0P`s2L)$%z4w99qoW;s17?HMXc?K{gSMeQkPGlc zMle&6mt!@JafN0;MSx>9{qupiba(Rk4v47GArFVe-^8d449F=|6k?d6?rahd)B|tk zAV?(GMZlOJ3gn_C08ZcD<9omO4F2K5)OSyq_#yj)Z0}dC!7$xn)`uaEVGRYFz$^Fx zusr^%iUt-9zZlL9l3)}rkewe8b90JGS$Qvi85+|LFk{;J<{0AT4tjYSmqZbl!+af8 z*kz9W_r(h9{Xnyu_TaIZxOJ^%NfIOa1KXX?iLT--i!eUMTo_|kfz5nu~`Sw8f&ROK3L4i3Nmq~ur7ukY3 z(43*TsjJz&$;sI?FvXQ9SY>-H6_OsNmCM<$6C1hZxc~G-lE5Af9G>|DNewTy#L=4Xn1s2R$ z0k67`tJ5mj%|X`JBM9tHe@r@1zO(SK*b3|Rs?@c+O^})@ zFrs?C4?!N&70xKM^}Xxz#i7N+S5D|N&=&fg$mEV=p*6xF#%5e%w@G?Ts(l!yHqG$$ zBkARYVwJ^6vEEy{KN^*f^U?bBc!`Xm-h^X`_ z>p_|*4vy7s+DfrL+y)PqimG%tosW+#;dUXjFisc(SG?E?1ZwIgXEAh>Ark7~c8C+I z^DQ>|ZTeX(o*I^jTASY|Xc@}jqc6fMS^b?(Ak?YeXPR3tUhyOnHt;_!K+04n)&^IY z6)*NR)K~d{;bFftod}e|qOX(sb1Kf0Ohy`3vXGemnuWnGpGauM-HOR0)|)*} z1RZ-pOmNYa)0gNf;;N#N&m*ZY!NA|D?#C&P48n~sNM0n+wIh8fU_rYAbFWntdzQYN z8WtIDE%>LVL|G~-!R5?P0{=hl01~You*%t_eXHi_U;t@)DdCiJ=W(-cyj`58i11T4 zxW%CA#fac|StR8%;fmDU1-R`z%4$G|rrRkC9;c}mO#Mkgy`PIi7O~{}BFsvAiuV#= zN`$1SkMXfaxW0LPgzIaHDp@7;$@QgpW9czmnsD7(r8~gtEonag!Tc9suab0ZehMZz zL8Dt=x0dosWnNZ+nV7>slV8f3%+&*#>vak?4pq|wI_%ke`Oq)7ET2x%Q*?J}$`wAc zFma7wQnCV)pwkw+T<a{f&d4WtLirlX*FVTJ#IT zVk?mqGnhOfx`|M29&@AE>t@PLEjmJ5$V}j^NS^Eq46a-QU2@hJ1C;SM7vGfCj1AGiEM6Ky(>=L8|z4FRq9?_6L$$@^9g8? z>@L?hQgX>by7~$}E$qKe&1@i`lID`n66NadmZG~$pz{j?t7-q0(|UY7+>XRVQvd1$ z9t|#15T=(ATckYv&gS~RX7Vc;f49H`L>k#jR5w`kS zc55u_hiSRyir9m4VsQvsxD~U++#O4r$6`^^cj0o3Z9p(}NrdHQRPZ!TRDq5IC#C}o z$V#*c6`l%2$^{ez5P2fr$DKmyMV_XiC-8qCuKBadQm%}Y-Tn5_k%K7_#)XI=ojnT9 z_aIL11(vspnC7@+l>Cm?V9tRLv22_Y!7TLli>}KpyK<-@&^q;DOpoL=IF=yK_jA^U zh4*oK$%Zlk!e{sd3-^h#E6QlLGdlMvO%P4B`RyGQ^?Kkxc+vqf2Nl^K*wYO5Amb(o zrjt7CHv|bimv{d}$TdPyi7u>^bb3WPBDaThl+^DSE30#As?%jiYs75cWu_v~o3Z3y zao#Qg3H4Fp9V(+aZiYW+w5Vt)h{KA$QP;k11q=tBjXfKp?8+KoWc9qY!fKhI6i&J# z7I^bV*TLsw`${8lVFCK@&P8Zv=tcaSh|~luPxgY()ya}HTaRqLQ;W7utN8xA60`g} z`o2uvBWIrFj{)%!8CDQv=~Lqdw~`1CvGF2$+-`n;OfAkj*XY}%_F0Un9(;;pqD0TR zF?~{t6|3^S!d&7+?{Q6K^&g)xcM5r&{dl;3`M1@B_W^gt!Ena`r@uE7TIa>wnh<5T z1eYSfOekb6aNZ_MFwQhi;Uw}s-7R;xe~hJY*u?tBFQVZVIeP!9eJ4*!dx0JsF~*a7 z4?N3>Z^sqgTKe}m$`shl+Jl}+g3qT(2!HTI-~42RtQ&W74zLG>9|YcPP#bnQ=dA`| z^$PPVc&k1+ix*v@aSJE1#~nDwy%YzcL6g|rNolt)oZ_Xb6vD0VLaaK2zTpuEbIQY| zyXy?vQ?#Y(iYf2k$$nFq9P_!19x*wEfVnnnyVCU1odgJpR}}@jcwAxX#)eT_W!^EU zh3oAQ^1GLd7(C0OzI-L>iD#ugL_U)P=z2U|1LrP5Yxx$8iEsL(-M&v0b>=vNt@?Ds zd)v20IX-MT3Wr7bvi~{|8q9fRQ1W(JXuhA%)|k~WdqP#5SNaB&CW8xL2L$~iY4M#9 z3tWs?OTMKe$YI@)KM~&vdIM9CvlP!X#H^so40}t9DNcp~_@H-vW7Xxm1zt-5!Iv9Q zZAQXw6A4nrCIxXK2arGMhO{AaX&GS(soWv03DuJtL_%>)xp%iWDs4!54?|s8jiJ4` z@xCPnnC=|dbPhu!bncL?(Atf|1BwX57GmUOM+tS#k4?`R(Y~OwQF^s_vK<(zEU{5T zCU$Kbm>(%BMt^T%B3Q|%$hyVSn$WUQv(f1&x5!oLv1F_+$Pu7MZiThEJ-1!64HU%= zi4Q`+KJIcFD>NW&z2VZ_2S>CH-Q4kp%N(?s{-z}wLXR_qrdkB3YBawdXp&!5fhr~ zj8^e8ieF&($ZgSuJGt@f6MZZ}+CyD+9*MN+^Klub4hVEV{CDOH?0aMMwsFk~y4W;% z1ws-0Ge7Vgl0c?oF~LC}qz{Gos0Z#rHho}-$?X8(*%I(ADjc20#JDfa#*w~Hr;4K! zUDjbVLAqYg-Q|7GhZf{H1r3~*>Xk*R%xcn+*H`P@;8Y}c9`Vb~2+8rj?xz)epVbM9 zfBDMZn2Rz))NyzriSCxlWw*E1^TPHLO~RdSV321|caSu`6lGRHqh0`?S-rwVzP9-I zZgKUnJ8j3a#6?4OWG7hy9_c|QP&J`8Sf0A)cDDH97@8Iuvr#B$uw(*$coLPB|Q@?CG;2;Yc7cbf0nS-&!VcJfiSzeNbSY^lox2>vxq!HzJc0ED7kR#$pA%wjUNqot zJqjI&hRDGd!4aeI6(cQtX(5Q_d6=GJMv2`34xu3R(nD8DUCNv2vpffAY49-hK!ZvB z^^n+O36BAf{ZN;6$)%>C6aL7;FIIQEer%)w+oO_V9X%>g0cblIG5v|29C2Q4t` z#DQLp_^x`T7lD8NA%QF&DdVn=SquL&2$G;4>a*o^+O=`W))4VAifFmyZydu;i6#|O z5L`ty`Z&C`(Ye*Y7cP(A@a~^x``t|rx2#i+TAvDAh6APSLx;37L<#vYYVglwo+5oD z4hfq=s1|oi1Qs3+(-hC?aOycXJ%P(d(I06R>op3S=S+Jm4rg|{)q1Z*&oHL zpMF+xe^lRRW0QyV5nMCNDF}O6Nf28T&ka_CR&sZT4SOV>@33;{y{!;07!-w#j_##% z^+o(+;cOH2_UUwdPG7~Lv_W;$rzQ!H; z1TVI)n#uVSJrmEH>VsZ|w}WrLIk^cyQl};9hxOb;uf>jX^C2N=`#?QD&TnU>)a^** zlVTqFT38Xz!4hP&27m#Nu;MU{mtdu6=Hez+SR5hi7qOO$qiPaaBj_g3r%z2-%;q{w zBnR%FbKdd#=n4nJDGr@bve0xFd6Z0?^0N{xw@k7=B14sCoSXQ%(LeXi3<5jxl%e88 zT_n?Sfv=61HT{8a3kl>)1-N%{>qJYLx7x0MeM@ijU=H!4UfTm(VBOHy{IY@5>?d@% zqRa9&nA=+#6ABe6v*ZohwS%(NI3!{GizcG;K^Bd<3lPDb z!TmRX*kUkboSnVskFdq|Q%ciM;UuG30y==Lv5nyGtOkf}H8IOMiVUlNAD+2xzIpyV zAr%hXw9Jv7N%?B71Wi$*20&mlB?cP*G|rnCvS&;{WfP3U6+;%b#wA#kYHSXya?>u2 zz~(7As#a870>=Z&GFGtVbhu5KPm>ueg3@#&RSW~c@b~lNP1y#(V{CX5G|VyR-NdnP z1$ej>AFQ1U`@Os3gr{C2<66zj93;%#<2`of1SWX#PQ{6wsuzKw*yN?L710kq zYk`wW@bWGFQwTo&)*;Rg`f#hR++RS#^Fxa8=*Ol}E^gS(J-ieh(IoygFQ%=y{X~BW z60VsDDjxkE8HRps5J%v_825pNKRAxcQFW@?q@IUMb#@hae+SQncX?5!GbS@Nr*4#Z zZVdmYBZ!dO1m!JDTt)s8d*v}r4^4EKj#v@tT6{QEn7{&l^g)27pXAkNWkTv5{}$o` z9g!J;y{7hSEh#%KWIko%#9}N?{XWlEuu>}YDxREuTh(IxO3o^_ zD-_Ww0P#|l*`y)JY{WHGvGm$}1fD<~ta}Rn4rHma4`=et`URQlef){#E+cLLIBFGR zx1;*xh$M5NVTx8yi)5@H9&wOmn~787VZ$91(im^jRRnuN56DIET)lw#@rgXHLZ4Q% z+}DEEK8tmQPg>DvMGUcjcY)#fJ?%YrK2DuM#O0wPDVO~&og>Ci%*)Yzu(G`qN%-c) zGxH#p)F#UM*>Fp@nzB7ro#3yG-Jf%qp&%&xH&?5hIsiP|C>(kHL?Fu+e zAj?R1lSzXZhe$8Y@Is)IVPya}NmBtS*5}CS{k)%Cm_p?OY?xURrGI55Hp)d~VMJ$s z0Icgr)*LNz-_EUWWANFZO@xvXM6Z}2V3|94Ve7b(cSI4L_lC57wBr@%0q1m6v%B5s zVpe(t3(i#X3#MbD6u~rNz+qt~z=1~e`Nh}CTJ;f`RS;E=AE0@dd6}9SxG8Id^^^)3 z6G(;1Yx;7OfnaF*aB>|;cPmU;cwJp6rXsw~)$0onVm1V=W`x`+OJ@K%9%cgRUkDxS zwwMemJ81Q*(U?MzcK|@;I|TY2^->>${D9xL%=tRogRU`p7e;Od496oD--4N27!z7i z9wxp+uIo^y<=nC-0t%CMyyd+zK-B*AwKnWJd^_Xp9=IoXy~6jjP(;1r{lXUJQLN9# z$K%iYsMTwM^Jc(R%k=rgDc%$SpDQdRS<9L_=HE3@B_R9+ZIgN;GgBo*$yvq; z;+`r`*iY`nNW&;-!(Cb&p$-q1xXJ&9CJ$fZJeU3Z=N);FsW2-xOVe!=UgUu6KAtd_ zI1hd~y9s1#cd;}vjpy;B!9h79@iDCJ3!pk31j?UvKy{qnnJ$ja51jSbX3_WDDdv7@ z&iUQ-#{cYWw%+E%)_WzKI<2+@Opnt!CS)8TN{kqr@|#ARb3dzFc=ZxoHidpok7$gT z@oHGE`#MQO@Gz^m%mU8R;~0h27#>vJtp6|&{Bez8%cEw`y&b~%-XMps@5?v8D-I^>^$0##l-(aLMvA3SJnanHDzWS($YCN< z%zcRQ>FIO&rmu6xeTqdDj0Nzr4+GyKDxy@J8vqMZ?;urED@NQv5ZjFflUM)qFQ;$C zU~6#?XrGvfI8WfeIG-xCzi|=$9g&i7xy*KfJVSQC*D#sa@BMryTDtb~S$lcG7_Bgo zYxlr_9?J9cFkj1qL6Lw&R~hR8sArrv;ga0f6g4z7J^j@7&l9+ zgf;`y7C42qx^?Drpw0?QoMDO(Fff*Egx|ZPcTN^Lh<4m%tdazI{p!>)}(8-c0 zFc<;tq1Gee&Nk7t-4GAr32aW0npR)0Rypp@_)06w3I-Kl=k1fih>Xf z8by&1S#F(u{1E<166j{&jMA9m*j>uSGOkMf$~Tv@aEZ`qgUHuj$Ex#<7K#-Wn+rTR zjd0QO5jvEZSM6`>E%f?n4SA~&Ewhke_at?90?`4&Za=(W%>B!VE8AkI`CaBkh6?N0OyV$6h{ zX%K8|O5;oU`+hu_xn=VQv_^bOw(EZ!E~Kj;>DR-rBA~VsV&EJm3c`-{;{z$hNn}xa z+~W#jg&Frqo$e&opGng|G|i)1-*CdR?ecUY-33Wq`1uaoKUuj)|78{y_yCu<6fH(5 zF{N(E7O&#xxPn`rgLe4Z>Qd-u3 zn|>K>gB#`%aoA$sKjoiR82Yt>5fLdwAu8@sL~NpvMDn^EO#XlBaz7VGbMve!*Qt=* zLJ2B_;1SbNY6jHplHk95aQJh%qvJMC)P_p8W497R@OL6B5p_-#EynCfkqqSAZ^k>= z@M#DhyHIAXDXdycyb1I_M#bTW-@TkTKbe$h-Ndhq{>{Ts*Q%HM5?RJ7u6dozG&5RW z7e_Ca-^cn;88Eekz6P~sRt;BCE72xVP(qry>cGi_E|L99uv;DR$#znrP>d#px znjREf+5q3GC)qcABk^2?(@&bs<2NY&GZQ{;&HwHeN%gv4LNKt8PvnB zlVFO-35QuYJr}ocEP9r_;Fw^xG8?yNpOCC(tg zxRx{(8NGi}(5E3KwEyC~+Qg@0U59y`=2A~{Jpa&xUIC(T3|5ZvE7{Wvwo3;TB0a&P zsUcEzbBA*jzt2Bc8BV=Jk}kiA-YN!s>YR_s71Os_C}WEp{Yh{zN7%&Nyq*D|UI91W zEQ%x%5eZ9d^F1f&-Pp_3O1R}X_EG1}K#uHRT_mMCjLxxNc-9yJ2o3-?+-!eQ^6x21 zW6$ZNh{Dn(Ph{=co~&o|y2dsiO(L15rQXl15@P87iIdc_RuW})bBPI@vPt9A>dtH- z!~E)Y+BbXgZ4tN;h9s)$y(Px9?#yr4I9LJO+bH69Rg`tj4bKF5J`V!DE% z(Uk%Vvt>>54)>zi$sf65aIz2uBOv*fMdn0Q3tD2nk6X)LIaMW+Vr+f^zCUE1S=f@! zVMJf9TGt044ZV+!iuacJzh~STkkYo9C}@j1_*pY6^}f2U|5LUcS&v#eckj}uVWObY zEjGj@oqm-;!E`w@qX~jkz}NP;Ku2jH=ie6^A?Jl2T(^2v%Eu8z*7E97E!FZ4s94|d zwVkF2J(+-yNE7aD@b5S3>#}nuU8s6B>3}Gx+nff6QQb#kG5K=r(1@{fX5J(7lY=U5 zHpN(l?{-KWbR_B7Ts!cl$mGJURb$lavMOuZC7cP(u@ln#r=S;1m}M}{>UdhgJ|yqT zg3BH{@W!D;j3bMqYEFC1%g4YD&g8OLcax$&S5juK&LCqMCS<>0FiXsx{TTh%no|rq z&;5~fd9pPO(5xi~zmGSD&rIwR2XbZ2J8vz@3Io}=Gl50#5(=lIe~uEkFW(c;K|K!x zXL;K%ESmuRzJarJ!&IaJ)Q!k?&Rf9aTT2XSs7#T%1^ zvCwf|N^L3`!_fo=sgOFh~wf=SdEim*%nn_TfN%iiCCE2Z~5a44D5qIeS$~ zh?Dx_4{cZee#^Le-2UTcGHvMIdW0MiywlVkAcv0uN|kG6pkQotrjp zbD6e!5iKNp3P7=2o9Nptaoa(>43nN{%88Welpan^>3#5y}J_ zS@q!10;~d|rxz~v3uVchFpE&@yIagb^<>ZP(%W+DwW##=eq8oOR_In&UCKO+Pd#}> z+_Dx&27gZwK3rw^&Kg8f-}64UUZx(C{p;s>YV0woT;YOWZ24}5;TuFwWc53uA_x{A z_Ej8@ggHaEnymu8b$jC#zgO^Ki5@fN))>xaz|tiURT;V{Ou8tg&eC@u9rZKK63i4I z2$cBl%7~3@K;!1L{$3vhHqev~FK)Yul21=m>hq0$d(uR2^`h$S7JBT}f-YADb%@tr z0E)5XvKYADcXW)C>^8xlE4>-0T1R=?=VC&Xp7^nN8+M1-j^&m&(akGa@PD3@O16rN z7b-ZfqvK^%eY*GcDDf69WJQ;T0UgZB9b#e?aMz$V|2B!-ndbW^lNWfbk;}}Ecg5)p z9fAEY!{IHO$yy9>J#I@)TMmu?RUm)tIYIDn?&J4^2)fdtjVLQ7jvf#hA*>yCz68^# zliU=ek!287e^hNyQ1NKMmTt`t&=sG&HvlHx_cW!k`7w(v4X^&2=)L^+(_9sAWGz;e z-e!U14~^eaZg-*p#3~uk=`IyyS-vQ@JjI5(H1XPjD_@(>uP>N1P);b@)0A@cYf$%8 z!L}JOA=y&Q45f!0GFo^P8e0WyEHC-hftvnttcKHgVC#v+`#OPY!#y^44t4qp?V{VS zMVyQ(FpUBFO1J2}dY;4^zd`QCU*3vvH8S(+$S8%;2gymxfqzYkVvKtxPgi64xL2a; z=PMo|x8-c7?)K51?Eo4UFCjJW7lI9t4nOg%z{qK-wdu_yJZA;uDJP7+d~f>S55)WT z1Lav0nIoW#bLhczrD%(afv~qq1Su0Q$gPRdB;Bh|mJ6DI9*+aKu_g#E6@TseKzIkR zB8)tDP((2%g0^J&MjK#TnfCN}bJ%I7Dmy}Nw#-lm@F=ay@hzW+3GRy(_khq0-IZ>^ z&gP$VF-KZO$l<1~+@huh$lLd}=oUEi7I!~5khW#$w?YWt_I2rk`tAAs*TJ%xD$=no z^45tY!Sg8{e^9?=N?BfX`vt7Uiw=Q4fOq%xT{FAg_?ojh@R6Iz?werLsGWv!pT?a?ST0!{Ay>)&T3;HXpBaNwqT9LtBT*#su)x-OFEusl z0gv%4?RH%m1v-k1+kmP~`|ZJc2Or>mZ|?2^&%zt66#&Ng=h1`G0_9H}(Y}vf=5AIm zITbFI9>aFjiv$S9noRP{>)h3gf}-cua4YJO8ecK;kXs zCNa&}M0A*^ET?gnm6k*Rh~Lf`o`jTUG41P$1{vixQQ9$vW5Xtc>W`X#R_lax=~Xo( z{l1{{ZJ`IA`0ompfWu_8Y$;LqKP>?9+h5#^T+Oo}_xqv6mm(x-K~!49oZNSy9Gzql zSeGp2ZmLuoGfr4d7684Ndjl$W^q@}Q&CGptzh{R2J%@A7%k3tL(gVULpnwzpEj2vK zf{;GQ;yXym{R=vKBcZ5=b>C>-aXEvX#3Q{b-SQO1+c(sk81=)wte8Nz_pu#W7NHgQ z?AwQj?!J@!3Xl?de)FJBrMHIf-Xtdoh!2%Z#;-&R4CR0vv5)q3G*P(v**Q@dn^Pc^ z6Ln%cs*4Xsf8fQTz{u>U3P+4Z0xackZJye3b|*3=ggXvvdU;;amAG6}0!6ZMJa_{> z|G?R07wuTnWI>+Td;T0kA0{A6ICh`De(B$wCJ74)&h+lk)@REyV8@a0^jo_LW55q) zVlo$EPb+YYcxoxH*xn5w3xZ@z`z8SLoJPEbel}fwvh^$s9kzYDk)Q3+9~r#oc3sN1 z^ymS?%D*&%T^4xNIF^<9)w3Q8NDOAj)Y;$tK-LL0sWlJYbZR*nlk``YdZC{*yzF-p zFX0?#9JN=6F7v_gTdIIK!wslSZ$0*WoGQdI`Y8&&0P#?(ME;#ZPN)*%oVI(_gc7P{ zA(m-uC2g1ZKnQEQ45eXnwJ{K%ZZeNPyD-oe1hRnJnqZyw;X%46Ao_OXgD7b;aExje zMR8Zs^xk;`n^`5r&^0jgKTF>%U`|_-#-fC>uoeT5UXEizt0KCkhC4#cvqo}+?^hZG zT%XOh^XUwdmyUrTkRn`;CM~T_!&nn+&4vJ3|7J9}!gFSC{vAiUyiuEvCoPBy&GlN1 zZhmFdXtJBoaJB#isS3K|oUaBqP(OrZ^K>!~6xF2vPhyTfQ1{=(l@VaiANu3sAjHMz zc_`E#(p9kC&nTut&bS@twJfx*{LC{M{P*(|;fQ~4<4G4w%sCa`q9Um@E12NuO+_z@ zkN*UTSDc<2vL+6?HPuV{jz^KVESN#c`@gUe5y+2%^iaZ2`O65*GTw zwLW7X01kgLt&N~p6d?ktWq1WN$V2J+51_C(54ONvr zr;9Vlpg?n2BYdIk=63_2`KTnq$^!uKf26f;=whY8ws@l<#Z1Mcn*{jq_iz4Ou5E4Z zsS^eBBMt!*ewe4Gwa_6G*mf?f)w7&H>czC?HVBGsv$J5#G&d&USOnW@8-M^gUY6XS zNOI6e1$k12BuhD!nM-*5Fvqp$m;2~WgDDVp%l!rrn2QPe(GB+&A@xIr-*l)x0AMD( z;$?}8;uk1Yvc{w7hx=MTEd<@9Za+zX0Z*;!fy*Zfwd-*uh6d;dLmVb z1&)CtE11XU&|sy;TLtf+iPh^%FYc z<;di_E3kNBbYb=~q2K?Hc7k#{U>yUa4`)u_1{;~?>qDg||Qi`>i7kOm8$+Ube z75}ab_UKVtniRFy5g;)MWm+99wc)S~(LT8I>P}(U0x)3jHDj<0HZ;ts1PL$!_JZHd zyj?|OVNx*RN!TGEG==+o9n{yUHSd~hH~iVg48?(%8@ROs`R*~uBqX@JY3|Anw*^It%)j6J5%Q>H=^gU+$ZI|{(S)uTito(J_Ju?lk^Y2Bq_fcoeJ{w3 zy>+z<;*}odQGo8GZNH^WZ#Vldo~F_pc`W(sv4C%oszVt*^?6it`7?Hnj0VS001#av`QfG)&!5{%uJx7w&AcIC*Ml3Poh5_SHDC!8`Z4{rWNuV*xoTKA zB+5!TB2IK~Oy_MBf^V$^LK)Egk{-#c7f610=_nNiS*;I`Dj$uKXGK~5!biZ^wOX)RyB@4p(&p}Rd`QEYW8!%ay_18`>v8z5msdt==ZD|BjQpK;=js$7@c&2E zRR&b`JZ-w;lF}`uq;yM*fOJWx0@BiP>5vX-X$b)V6{Wioq(M3arAs>Ay@>vQ?+3nd z&pEq0JM+vl71faxz!fxA{bEA7ynYFTH=r!u(7lUEM|--_r!JaJ;U4R{2YHPZXn-Jc zDcZ?>`mDc>>i)`>NC@;9$!vVVV++t!&e~xK&LL_PIcOm_jafA}X!YVjJnPvTvTpbc zKEhB^o@cRF7XUY^5#dAcchVF{NQ0L0O2vdeZO=rxky@jn@7xui@p@dZUY@-ZI?W8s z!ijVBKg1jqIWNsH{9L}>ma#s2WR^qDO|i3LYkt2mll!iL1%1f~z(dc$4;UebgC+{0z(#q?`&+Hb{ABYlA7?2;At(`jvyplOHv-!m$X%&$V<1$ zw_yjRzT+OK^3Gp8kY(+fJJE~DBZn{KWYlISihA_Cd6CT7CY|pEPeybi3M6Jc7t=rU z(W5H!J=0WizedU`Cs6AWh2UTb=*Lo}p<07xE$8 zAX^Cqh8ApyXj=VDU8<=fD3Dg3UEu>17G}o{5Altr0!($`<>@&MZS6?!!c5{S`2y=I zmMN_DjDg22cI*SUETmK5hwsUrobE@594rz}8ltH%1&R*L4u4k7lKc&Z0iiz|FM#B* zo0p&G$(u6*Y|$%Vx9^X=A(umTmdg_JSpi*G$1?mW`nhC9PaVUXv{H)6LlupXiPKIpM5VkDCpyz zkY1$JNgkGhswfi*f=NG#Ainy1fX4nxAVLc^c2vPZhxwDJT!_RoJP9JXGmN>%VR96r zMTRQXvEFZvAu=8Xy-7cYiP`Sc`aBDz&;F3Gmf~#_)Q&4s0)?en`pjGOmxIRnntb@^ zaDoy;Y?y&qXolypw6S>*=uguDE9}V%$GrO8i-R-!wV!SWvS1?8sE6aYuGMpY{C!*X zj_-O75D5!Veet=<^Ec}(wb=pZV4W;RG2rKox)uNpq7|I?H~}w%YEt*sl1RcD+2ly_ zrLW1ffciYbvfR*Qdq71sKFIZK>vIF6%{E$J9gihCOd*N18-+@T=u1~d9KX&IzdTIX zDY3PtB>|qSBq)RpBQCi?@;NfiMC+1MPB24%LQM47z|1iCV{1=fT7btLi&vj^>7iyJ zf`n@BL%A}N0vBz#;-&3Sex&#%co5rZ&;Ys3>^8IC<(GDGlMIF+VJt`~;tpb4la*52 zP1n9`%(5lq_7Ny=a#1G3#RPFv+Vm6DlzbM?KOF_=WG z44_x_#HSOGtY5*y)P-8((+5LC{$a2JMrq^0i`@I|EAflXMQH%WdS2+CuwinhQxZA` z%{l43GDMnG#H;R+jZh-WN68s0H(~fNWbxo-3-TSE6Qe_%g5UQCAk7Po9(gdE0c&l+ zYN#*8C3iwh`;rLEbxn-MBlGJF<8|l&yvlcgZ?<_^)$363HWTGE*ub37QX$9tsCGdp z{p>bq?GOazUucUmIy=q&h+~xxzmuVgOTH0tn7ywAKZJO5C!0ZY(bMcd@JB)l@Zw9rvT_k!OH)o7S2(SX8469@oeN%s1 zj0l~x<=|@&e^cj|EXU*S3;_xAr!o4V=*wD1mv;;eTZ1v#V!75~h>ld)GSd5K%BomQ zsYo95lK#lb+m4bWd@%sy_JUC1o!;RO^ICzdvtOO$V+(sFPcw$&1V4KnZzxL6fF=FnB8Tid^7=# z_%1x*Aht?Aju_DsI~^yQeoxUtIrkL=^)iH>d90b z@T}MdQA0uwt^4=do@epG3=r_a5MZb`;9++WraOEO(+A%+lvDFf9a2yWw6km%I0F2- z=TlWl8NQwdziaiJE`E|~@RdOBfMJv4Gm%A3ll`@>-=7!uz;be7G%&0Txt!~J4t_PGz~6qGGwBj3}6DLpnXB2mDD~r z!rgmg^9|SpkVOH!wO%ZulH~+)-Q5cygw(&Uar@m*BMF0M(uMLH}Gx_A{WsbtCtTbI;VM`V_pB0 z1>E4wAxL)|T7}!W%Zz|-on_SO9OHYTgLtE z((w1WbFGmcm-sJ_K7K6PE7bR0p@pxt&~ZP<<%Vg2krFqw7UFWJq|!+X2wDYQee#;5e>Su@(MauuQ zKnHCNGXujdYFFcw+ta5p3myZ;jyEW6={d6s&=uzD#K>+Wo3e~zjp{4f6-)q!CSq&?gD5@gT)6jR7@_UT; zsL>eN4wNe3A9cf;h34X2C~~El8A;VS!o(jDCjM~`+aZjlffQOnnoS@_zH24%Axr3R zrQYxL;abgm*k5^vx#|%W1#r?eu*^oI2suswM;fDAfUgimS7>m4G+eEezUw+%4)e80 z=_DN4K{#E1ZV?Q}KzFXNA&Jpo?P$(V!D_$bL3JuB{?*ap0+&RfPJ@nFiAgbqCV^2& zXf+6Q2GD%z2WG&pYL)!ni$*>b{y+Dgl8NF0F6Tvn<)y<~O4nuoZRqb)Tq4#0Mtt zK3BumMc#GyQQK^U2|g3dlZcGuV<2Xg6DgZlPNho#P0q%mNzBXRu5krnKOe%G#j9{6_Kx_S zJd|2L`@K;}#M5xVrc7CA4GQ=k^YFHIfcq3|ZqpUo48mWm6c4Nh5`T>p+Hg^0w0{YK z*J}dB;lD@KlQu1IEU-8aqkf8t^=~}|5*y2fu-Sl)&Zrg~(JM_+dHsgp{!Iv~7rC=) zj%2WlAzgc#iyZS&_e4})zJ869;|7IEqioFy$Ps=1(?v}!zC1nl(`N9c)NtifSU0g= z*<9pF+(7aJ5t8p2cJ^;?9IMJ0VClI8LW1r(cmryJ?Crw5PHE5Gz~MarNcyEn-g0$* zc>D!zy;IxJLxw+}WWZL#!0bc#+Uq40FgnpJ#nUqRCTjfL;zd$Orfg*%y_rO)2cSru1wfAx@7V%{BsEDk!@L-sd*L`RpCyunOL8R6{eb2? zPYKO)W4tW5h+11N>fNb!5&pUOd3n6Mw=DdhPD9XrN$?SYt(THs_lCUOkzqr<7u zkG5k9tQ5aX(}uZt0}D$xt7Sdkd~x@cr)C&8!>Y%iuR#?YHspCOXF zn62*g zxpd#QA88g>k{xn!_gc$d5wfxJmS|7VY4=-H%1Us%1tNQ}eYT%x-*|L-*hNjo{?!)- zqNZW)?_6WO*}3Qg`PD!ApbfSuBLRi$3WL(>nC3SEW~d6F{0NqEVIgS@7>CPiMl3|KH45|<9>i%H6u0sPiK0sy*Dpmku&UfvJeN!U&Cpy_{NXc zAkor0if2ohx((JzZ<{6H{Le<_N!L1{=Y?OifyP^Tn{GF(asZ=-sn za{@F1`pD=d_@Jw$i-$x0BQ0q~2xu2qXXp01pk64xhH#?Q9o6i;B$^C;2ck+FkM)cW z=yP1GK;k?(nP3j|X`C$dqACbGZ=JqBttPT2>Uvqig#AFM&_N>SSvz{vK~NSm^&{+u zWce52;U#Cw*QIw>&|Nz$>0Z7+%sMy4)~|5L*_>=NaNV9MR`A=`u0oaF3dfEoo|5my zi}1ojh7J?7Rw_Zdlbo((&%L-WU0! z%FhqLa-^ZsC$MZ$H(!wUnI_iK6c-wiQC5G>YdxMgvN5h1*q8Qfa`7h9V+-^{i_4)y zv%zmtM*jjM*Orl@h@kBP^7*<{(VT|Kz2e3=a=S zcz&lC<+;MjK&Mi=`6MtNAU*8x>Vn>5c%53Bjt>pFQ9jr|7uNE#8aH~>`1{L0Isy*R zEwXTj#zy$>=@jZ}oY&UhLa}ilm1Y*kAyWrqE68c^+nU3?a`FtFK5_!v&FD36_m52{ z`;VtCj_3D*AJqraGE?+46Qsq^2vd)~5>+2Bzt3O&6XP zgW;p>zyy5bTnHy%dP_HukrSMUs)=eXH|jZNGa9=-R`Q%z*_V1J>SY+G@c3N~u2Oa7 zAvD!Gryso<5wel?`)Oc|?-NRkAhiQ~jfedOf9St3EhGoooHMD*M(gwshduC=A zqR3!@=KLUSEcpVsTl5iUy)g{*gZaaC$7Fepp3*1WBabtpK&-BlP=}VguAM z-1}9UzGinha-ZUCo<5G7nbJ`Qk4vh_&Xr2n3#f@a)BCg*0UG$$A8hmb8M(Qo!m zERPhZdvfYLe_KHOXG-zk6QWF zeNT3dBz{+&jdU4jEXvI+ur(U{5vXDu-9Y=LQ!kBP^HBzKwp2%$7=E|y;4#{7Sn=GO zInWPoy)!N<738%?lJqekzt^a#(syq`USIrl6EF;g3n7su>x}_GLN6&A2Kwj;5Jzl^ zxQ=s~#Q}&I(`nxeM9B9J#!xG_)y+)i>hGQ)de`fm7+ES@)C_P7qu0?zo(=v1(U~2JzTp@(zAg%ah{S<6yrZ_caobBpf_Uv0g2kXBr9niVpEpIBJ% zq+q5HR59j$lGiHT5+e#v+9Sqa=IbbyoDOp0yIHzr3ui)~tuSlVe}VaGuS33`S>MM0 z%z^#J;?5p&5Q-FyoE3Cll?ScX)q~%8oOfBm-=zxK0i6=ek+FwKMffZruOKxVMIG77 zkH^s7Xv|6t!E&F`3~xBO*awhi0jZwQfEY+E;QmVz=o&(X!c2p`4G3SeJT1@AcUcS; zTjnBGUA`*xInK2G{zcXKsp&^G0on(=xXx>^3?y@UWpWHG7-I!+q#;E9)ao=9d;=(MvY(n^u#`d!)19R!?vw45lgFkKPi=#*aq%9F zPz*n6Sh{+UsE?A0VP{2+_X z#%g?%(dC~aMI)%Ic64>mX%|W{SE;nJYtj&$I9tcyu|*wU?_7BCz^|NvQt`EYuXe(V zo={#6I%lR0~v-d@%CS+079cM(qF;E;J znT$c~fx9xC5zL(DsCB|`^{r-?e{~VySnUI1K9OatuR}7QhA)sYgA z$^0x`QN?1k?C*oTlHTGp%9H4#4fzUf3u`EM4w>1H#2pA?F4MOk3}|;yymuNMfIp+E6-;d z%|fxx7Z9gDK2OlfIU*L6$V6f%^nsGllp<;3n6ssG!fbb}GoHjlgsEvMCD&Da-V+|3T?64e;MqQyHIH$53jiT|h`?`OmodOgR0x;j88xSp?k>K4=e z_7h$3D+SZRbcP&#nMbxx{__3-p0Ci5itN5p^a}YOtR9PB#Tq<%WRqB*^EVfumiouB zfM-hjY0u(5N3Ft$9^C5BxK!3*!CDZb`Xjmf{qStbrb8WzH+K7RGD4oYvq1rve3Q zLdluEnoXgwpAYhL`%vqDy>D6x{WYT5Apb=mhy{riDW9I?K`&~Ei6@}B-l>Q5j;xgD zHKvM*n6KTV&qvJ9t6y;YUbyp8p$E5Bm?JK~u$5LB$rkh1agHFDmy49U zT9ss}%F9Pz+Cuhb<#VJc8J?<=@mo@o9TM=H6X7iq2OXl#oXQ>HB_E;jy2+R{hjJ3! z@|QtyD|CV;=h z${<8HSgf02)=pYwjY?gnw_c|22~+^b_4cJUpWS43l0we|+3PQ;Ve;3!ZL`8CY(7wp z70=SU%w*}nzYlr_A{m;L%j39S)<6{kqQ@0Uqa!SBFKae_e~|srl;7d?ban6IR{D?K z@v@R<(u>R+*RhFtHRL|WGHIFd1$T2K@3LHszKeH|oPyeu(%~<~c}x^2(2bJC@69)O zB?ZlZS;J($UmK~09;lE#y>i87CBbmCz?yfJnu9 zJE$Z?f(*n^QUa+ku@oZp_-ILLY?WnWsLD`Qb{E@XdY+*14CIAJ;8ua`6-#R#Q;~f? zKGsTNt(Lb+m@TEc^q^D3Sbz{%oN(3i={jxf-H(!#T(PiX{ktS}Zad}q?}Fi1v+s1< zjX}Cm5ZD!`f|~?+F+6Ht-#1`=8WVE&0sk^wPmv%~OcVa_JHi$F{wKbASi0#>*1s?N*<^_KA}jkzoK z)4EPXENMT^9h~2%CpQex9ozF4dWolx2WJrUmXThpxRsZDIzHCjF7j!CX$9B;?!qDx zINlVKVaAF}l$U4e_B^-~?coHP9Rcf|KSK5-13weIq+cYyr~}D>Y9;26G|P9rL<&H% z1O>_GxZcyBeEQ|ApE3Ct8&4=%GyS?pzW6qP9`m}+Joro9#}`r0wH;#|2oj?-a>`XI zbM?RH2D7@A-bA1sM9_*#JkMP-ods3i-q-smFSXJ=h5V~aza!&i{g^@n@^elGM`Erg zUhgdHVnxy9%72XVkm9|X;W!XJppRzq%}snri&z}a8d5USpW2_P_36<44dgr;79t>r zFXQz28JoX3ecqJS6S}hd7Td{v#8dF2X)1+YrG{GdRhe8in|4QU{7&v;Ad)hV>PrJx zSoImcz5l*;vn{Y-O^PhI$SG8fn=j{BsA!kaw&;-pOZrLbR>%+DclVFghri|G?&*<1 zlLbjiW8b7NyWr7Di!)+K)=36}UO96xEXm3sRd!bbss_7Y;Qlqq@j3=MZ17XbOEMnY zqQ!{Hx9k0L38wRW;9bV_3xq7}9f;SVn^HbXaimTzUqL&lwGuF-+8XTuxR?R6QOiQq zD|eF$(9;>S*cRPalG@RiF{ZvC@DWG5=|iTIVbEj-jY4@GV}zy7wKqrHFCj5gIQrtk z>NCY$g%*;`a*lc2N}ycE$d$e^m+Q&3&v5`@RP8l0oueuA!F7+=2It1 zQICkQO>l$8LfAjH1!LDPVX|M~j^U!z@+St3WY5j`EkJDr74A#Eb#f4ojk;LTzlz$( z9&UNY$N;$uF}E&OE64}^LQ2N{HGcU1krN08Qc6=wVM@#$4nsPCIAh0FJQWqL%Ofcw$fEAsR_W0Y5t75$V_Hb+fqt;kT0-*5 z*Ax8jNT$|+TTX)mq48r=kR5)!Im2G%7Q=8|t*7R?6$_@DCq7f0`=YFNb*ZgZeZj^u zjiAkQmY3k5hTSt!e!w+WybLE(hx|w3gXc&A2jU~DG@ip^J4qe0Ei(j4whK7rG5t`G z>ZFvAzg#!xZ91S+kPVNz=t5GUx3JJopm{0zg>uDg5C1NM(OkX4=ALMzUbpmU5=kF2 z24P~*4Eh=6RnzljS!`tK(}Th7S#J9|k-NAC?A&KxecXwpwi~%w+ZU+dwXslsC>JVc z9@Yj$N_h)8@u-dt)|W7HtUzBjrCMkug+pOOOrpZY<}-a?W$cN~LFn1Z_P4U<<;F zMj=AeV{@IDNI7jj|MC4@k6FD(r#>~^KUzJsp;e4X*jw0y@ z=B_yzU;ApxaLeVYSiO-h!{PcB5o{5HU{3GMt6*PvWQim9!vGX%X7x`0uVZO^jrE~ii!L;iK z)T;{evF+ZcF57%}&5C5l24K7$?Zbryc)`Q2b^7+D#P>gTqhG}DcPTvb4-#>gU^uPl zZuFCh$9>f6e<{cM1hx1FtG403;WtUAEPZwSr~2$Q5yY6osarfZo&k&mw1wFn+eme3 zuW;3f+>nUk=w}S=pb>L#$%TstQA55ljY6BsOp5wrp}eW!%RPeSsbY3oigaFk-+U6q zK$J$#7L~)E0a%S+h{Q+Qg(m{CPp?bGZys+>3NM+h$|)qX+}Pk0+dB-2PH56-RrNLU z4RXM8?9)7Dcw{@G>!e*m=RuyyRD(f;vq!Xb`0vYHU~aNYl-%eM3f&M96HN?BumKIJ z!6cQYrXUR?tknyvyf0{3AlH`DHl*)QCV8zayv4(&CzXSUjSEuYdNX$YHfK>&^yQK` zcKv3(gq7Y5t;F&43NGBAdL3w+(So8lfO)?nCipHJ`Q+!mZG^g*uIamEuZ_j2I-xCm z15-}b79dJ*ZV|<}CNj&f+30)|O{8>T4RQTeCt5 zs`drrFS`$ITxis;o5a5Oc(Q6m>^y|WP=O*4X2T&f8cIY()7XlB)btd|b6nFZav|%E zM5vw0#*trdUSXg>JW&KF^>a2+eyv(r7I{^zG}li?7fM{Go^ta*gs1m}7@ zMHY(E34dni;RvjUZoe+EiJ(k96==MC(qu@z{PbN+#>I)2*rk<_aq)+{UF6k~Zcjw3 zbfd&VaoE@4mU@%!RoYH*A6yq$STIk->^l+ImX?&5GMZ@_SWw(YW|sK##R5Y9ZSY-< zhUVL~$W?Wi)*Ek&=znWa%8lCHKkL0Vv0T=uIqGk+8b%Wp99)H0f9**PqDtnRpEtMJb>lO$*4SMla)saqDyB}yhlDs&~d32u6o=qNcV0t)J%&_`v_YLmHcJ0 z#^tw9RKMRN1_m-N2=E9z3kV37QB_LcQC7Hox)5+ssG zza_Ab+8UOLyUSgXo`_=c3JE=zI9eZ3J#jM%WPT6T6bqa!W>QLHdVBMvU+m4$Wfvpb z=$IRk|0hd!@Vr7;$Bn{yjNEP)WF!<&3?GS-WwABthHv-~WrU;WHmZle?WG8bp3KyE zF#fqQ62xGWQi5M~(%#;J+o#E~?CPaPbBJM%jl4``a=$DL@3j`NDi3Th=2F|afQyxH zyn1zlb^8n)Oz@1hSAE5*e+B1eGvb=#QlqazymIt#C6Ngdelc6}()o5w8xOt-Z9jXg zQx|uK^5&#g8o+)lU!{P#@n8b+U=3c7MjiufK%yG9f|uy?(z3K4b`FW za)u=T5j4VViET`IbU_iMsWJclCyi0NNiQJ0*b?_qYL+cVzMkpej@yZ7afxN!?EJnA z6u!$__xV7=e-Gl6mF6x42~Buf@$>rO%WLxeq_T>MH@K>5(O?ZZo^{y>$0@biuR^B6 zjLs}4omc4{_mU(;-n?(k;QzCVg<~WVhZW@iDK*%ctdvWT&~0wz*R5Z3ggp~7+?gp52`A*Q8n$91IP>=`Z!MT}O&VBU@7c@qE`p4ZAa zDEN;IONgOSluQ+cpe)RKyz8+&%imI3>4b&7T*i#>uNF0Piz;$@gJ>$pUx_YVN zRh_=d?Y9o;D3EwWy6J%#>)nNFqN&yL<}+HwmUrwA3YI^*@%=p@PwC#?gQF zcUG@5f7r`vIWlR^Un+vkpCyBoT>=5q>)||_Wqnu=gu04Oh?5%%Zpz$i#dvNQ{w>( z!B!BF;}vS`4#8y#fx8hzr&s?pDwHqDcuMblx$yy1{|5YgxjTre|5Z90JzqpXf zzA`#Dj;PhL_Z&x}RH|Oa`)a#yd%oqDjZ6zfk-;@l31S|t98pe1sNe5jh#2;QLP0%@ZJdHl}_Wq*_ zRA`LAwdqezS{1q7^qUvN1|pPvCZGEdLqGEYb8f3CgzV>o4}>p&GoPKktXL;APD)06 z?KAU%8un0s=XKq%OwU!vtQd|Pr46oKEx125jjb8f|2;S-)#hW`P!t;B=>^d7?SsAGTN%{9 z1I}+kJLTZ!cHJ5v2^bh)n2 z(_>0Wh6qTq!_#z*Vr$ggtixz3az6-Sv)FHVpi!iX)BFGoeV7zjAwwi%)`>k%Ui%13 zkYwRJ)<+b&(aL%+l$i}NgT&@+WlOUq{I7k9Ekg)JjBYv=oG~)Mos52tX2wlEVT9wg zYTG{Xt6$bNcVL`%^~=o%(;UP0;F5P>i}Z}DzKfbKxc#z=3c6`4x0&%fEYnA9Q}6(Q zh4krzwar^t0_<}+I`i}1m6Jg+`0G?AV@<7x{Y8BIiDvh-5ciOzOK2u6znL8fRE}$NzOvZS!t!qs*a+4x z?TH=!kX70xbQ&tTiWd(8$~am}ib9G?gP2)E9@ygp>7$E9du@^SWyN8LCe8AGsNM+xhx@;~o59;J!YAG2v@UWQOG#S_RnJ-?b zoZMKv+*n>;Ps1`j_%g-{>8sB^ZoXlXd-A`NmyuxkV4*i*E$xYwDs1m>D~gsA7`}&O zYBMOcS?cU;QJrzCNN6}<2h(RUSosJQ9)j{VI`67wtl`qq{qq%F$yS0q?P<;vy3@v( z)yaH8%70ffHWQ)u0IEo~jrf^fq=LF-B95h(4S|C&W(ZXm!JSekDRR?vPL z)4@&6qmE6y+HE!rI(KUm^uaa-ru8M2g|@H;BX*J_t%3sH(`x*R%!CJqjrablR>E$6 zV4@(PJiAezJ3EINe^tF;g4g-(*%m?XVUj~*5IE2rIt@F)}?+_nVv#c8v- zNt<87fT;02yTD`ZR^961QIvQ#CClkgidHney?Elku_W#=TYZb0Ao%yX6c_l}IM2g- zM$#3}=@;I9CAL0~bf#e~QQYc0NqY%V#&U3%av#1EXn*TTrbvSqGT|i`iai3_jBSKf z6eUSnijYu}khsNblh!>(3~g)-erf{wT{(RCy*U(BgTTpQA}pad`9GGX|2i?TatL-P zzta71ralhN&}dZ$o)l=emZy_i@V!oyoh|zPKTD$v(3#mdJ0+@bDZ=)pTT8=n^-t89 zamH*DKKzM#a@WiI?SmrZ@(em7dpfG8-v5MTw-Bk*#wrv(+a8T6{_RlK`R-*bC7a3{ z%ts)dhrbBI zw`tE-xT;U2CSK&NKEjJv`-PC;0vKWzbrXx%t`BybJop*Epz#;Rr0BrUW(;~Q;T6p( zf5Dx%;-=u5L~S=^J{i_tEOkdL%{Aon%j%6 zpKeV!ajX)XkJ(T=MBcTnaxD{dxV5aR;JDlg<-HZ6(SLeAR7<0V zw!IF7RAZ~EnPQ|q{qZ3C?0iA=n1@CxESHW(6ME9Co-4Wb@4f&}_)5Mxv8-lIt9tV9hdK>=s-1 z%*%U_vkHYi+6V=R@VQ8s2CX!A2X$-=YSR0^x`3t-8Ni`VyAybGxqGcqG4SP>iLcvA z%H-Au#Zj$#Fv4;rn_Zp0`zQRxWHdL53IX{_f@ZRsG|%W<*cr@u+U%0XcU{&G>m|#d zJLU=-=032he=zts^q&Q%!0*mqeBq5@osD~SFbMZL_#`+PxI)ko0o>dQw2Bv37wwoK zx5k4BW;}W%ozvtvAoV8{vSP=0>Ve%9(TJ*=nBckw`(3}(Ng9J72VeM~{(N+*`1h!s z44Mxhn{zn4JaE@FR!RveE7S9ss`?l?8@zg2<(Lk>e=Q9KEesc;VqEw`ZS7@N=ce<7 z56QP5q(;}ip~rZICw`5u!~0TR|6rg91rPV{)0tzR0BvC6#&2%VACUZpx21hj&%pDo zg#8-?1ZtC~uX-#a*>v&7nk_BO#0+RNZZ%~y3}9jUBlt0A6DDp~O^yV|`CUE*X;9f8 zh*;5b{dllHsP4BJrKL&#@9ZVS(2H^&A_-U1rRlVm{>=rLsi91z28Ew0kpUbR^rf|) z0+`A0j;>h)g&Y3Pw+`q93lIR07b**%kXj)m4sTDg4`gN?(8e1`zdd<>Z#tux9i&?nH6nEfj4e9^w^1dMeDfLZAXuGut51|zwu)m4h;^L$* za#zaq_4l>DZBPkBM8nw(?9Im$-_Y0598={S4sX~lK1KR>PMn#@hu9OwJM7&6+3eIJ zTUSt*G$5PG@|v#cqrEGosKjLF`c%f&a4t1rv+_^9&cR5V<77DaXl;pWxNXrRnpQ-w zMNt=AP66nsRTGxgFO12T*ie=?1OJ1i=I^1K>(JxwhS>At*17}B4-I;WiEX+sF)+lA zV<5pYkPaEB@tyTe1B>X4Kl{N6=3xma*Ru?!Fd+bqvfXw}JJW;;NFo0@_DLHKsHQk( z_{$BGvC(u}xslAc|C|#F{9v$?K|(Pz21br-DBkuw=F}X@&K@ew68dD+!Ph~-%1;|m zcLYiP2s}D+%Py|J3K^gS@J2kIdB$TTmt;Y>TvW)Kh3F8NBInsQvpE z^K>XW4-Rf>mX*0%@)YX}{sCk)7WH&hRN6}eVH%7G$kUIBpU6JE@E0_^6+c37R016f z-gOiNyCO8Zd8X?Il=9by!m+}ir5F+Y=0g>1|x()#7Yo-0z}|m%Y_YN=vY|TVkA868=U$ZEieF=?-A1~ z`APzm`zgqstuyi`qm;3&u_IGQ*{MWb6hF%@_Ge&aHJG!wo2**%cyhGH6s6ocQp(E zKCzcEu5tSy1vHzBv^pxvN}Qc-EL{D33T1;cflnG^oJ{$$NyaeeUVzTGzO?y2ZP~1W z);+-29nJT}Z4P0lI^Foa0P<0lrh~F5W~KTd+$KqB?h^9{RdM&*MFQ^G3!EnmQHuS3;(MR zmjThXC$Mt`a$$uXp&XTQvXIJE8nqMjki{5u#7w|%FO(KiMMZWyB&x|Kk-hd_0q36E13Y4}3oJvV9hDq~RD*Vu+x#&^wWyr@0*<`5Y0RW1E@9^xFP zvpvz$6?{PD;rN$4cQ`~)quYGOJM}7+yj<#w4QlKoDIJ9Ik`a?6O`1kKQJ~e`1xAKu zRz8;I`QeKFk9)r02bynxKfC!6BzCJzwYl>BosuSUEzPpi7V6hN%*?5L!80=4$aN-K zA3uesMl+cdoV@~?ptQ56#Ct^qQ@^w;Hv5f=9Y1#s(D)G9jHq~WEtkp z@c?{rupErC7Tf<{Q0M*Kh1%WYtpJ!sd6z{y^BL2+h#G-7<6ykaIwh*TL%XHK1}e|! zZypi-cY9Tg)nMw0Pzz8)T}*&}TNHJ|UAgkv-9Eu?@7I;DUo&G|AW0X-{q{Rap)Z&1 zNx<}*=qIV~|57)x(D)(q!$IU__^tuhFZ}wbHOEOmtwIAN+XtEm$4jTevaV%6d2qf!wuN4MeaeGuE8Q-Ocku&%tcCLsVJ4 z%yqW^>vmf+@KwO2VTj;g&%!*+`K%EWWx9fT?eB1H$iQBY1UwP}%sWzqNz&r#KRt*3 zbHf>6k*n}8BD~1aniAN!sqFEKSF;X&djv%}w8ri<-xUnB(BD-9V0gg6xEu%HWj6NX z;4cTnuuvhIp7H+x2uy(ig9*<) zAAlgomi<}AgKG3AGt8^T-3&hO+sFw)_0NG1TNfhqyIuoy-zpC-?PN*%IBzkMG_^3#buh zjBA*;UiOHdgj=>2zO^|a3~PJg)gFB~PuNLg8%EB$X3}~2yJD^X!P7#I|LMU5z>hBX zjIMd9FFe6uron<2hmui-0BCjeJI#`-GWf9)qf~_-uErC7?xxz73y<|cKX_GLa1+D_d4>Va`j~+>I%KiHb`%(%QXw)K%GMwU2S(XTK4es(u>z!Rq~g zgn%B9qz|sY1a|B3clCoVeJpz!vq@0UT!) zI926W_(x8e*ECNux4vfVp0mquI>u%;n5ePiE3M*&0h-);gn+o)>!pFEwz_PR1{pLeL8GPS7Hcl5rb>r@4oGc79#_o4}S zqn7_-XCT7iFuuT?RsaiwA{n|vJNbEf)Jehbd_zUl0Gep|SX0ry%ns&f_W|ty{}AqH zB>HqTH)G0gcl{M_yg&P8+zMW9b$K~`Kog2ei+TpJ^&-`JF1TbffV||`91b7axU+HW zw#tTNx`$4U@kIWgHD;6qM;orPBFOAe1q*bNBYV$}x(?6YlbUT1%9P#g{{Z4?ZNt+g zFEFo;*i_ouD#%>aQxSBKg8@WmvRUyq`m4lo(7PL{QshQ_&F#*KIE4U%i1f~m7(dwn zTUGtlBuF+?k~;7)S`#5A++U@APxJR@Z_8k-cYQ9vi9r!6!HDhGrFB4iw|ZS8Y}kor z7vT2eeuCIbAK3N#YVHz9`YyNC<%{Ll!sUw9$G7)^i8cEJSsTP;;~`)%0YMRZo?tKc zGW8{5N~$x&?_kuH!FPUCIP zpj`Ka;{CMbd}FiDoz~ykR~{nj%(t{wzIMDO@`Xo0=2p9wgPe~Pn|r;dDhu-Pq;$65 zamVIycT&Nd`RQj)2sP~aOJ(|u2aT|7#wjL0tO1&YWFVm9@Go^?9FL^Ii1?!x5{@Lz zjqYLmzPMr2{yUmqA7hv~e8$~-G_3C+PaDZ{KlUd|`dd614s+N0d&JC~f-tKPeQ!{~ z|JPke2%77Qm|ETIpHG3$K}nVM4P>1>0^zsVFPmRSY-T91Z&g4j(#itO=q`=4BTMP9 z8o;RCN5n|)Z68*6C9^ok*Pl^B?KZVE`q@SWKOX%HFU-Sk@Moem?*nxS7NBp8%}M5* zpFBKrk-|BY!akB@P=o9-n64QFoz(tb^{A?Qz^Y}W`n+22X>Ix^$m#Pg-CNo|T3w=s zRJ~K0KM>v3E>wun!A0v0g4GXlm-o+|EiZkvkB-yM`YJp6pzai5Owk7hr`-Qm>#z?P zZ;?}*W;_*R1!N7q2?s1Zs+rK;X5WP8Ie=%*@FX@Zd=u`l#s^lzn_0S5^9rE)$+59b zbJ2o#?fiejE{?C+@>QL|_AmdI^3y0_jMC!8!9Q6vHj{H;fTJOCe&QD zuN3(=08h~XWjCvGyEAGm9zgjt2L-i1N~Ff@fr=WAs$K+OoalWQ??Nno07@+8YQ~%K z&nbb|cjLpXk$H}QB8Fm;#2IJ_xLSMdbFx*6{3`CEnaN~qoW6Q}v?!hTpPys+^d?eq z2SyUix(RD+v}iXKvKwJr^(eFth@e(J7(j73NRf_@|A*CR)?nvabN5FOvx6zZRUU5Z zv_n(z4-z|X1ZI|CpWf7p2nFM@@fPc6(o0MCa?jNM8Q2pa{|Ph!Js4H~k>V)hv|@E2 zbe0oS>+*iHD~&sSjP}1Mx*#*~qj1<+3#I)D@Brt6_7sO%$cyoRE8$-tX{cL8fm3V zP*NJ{lx~o25QdKb9)0xjcm3b@^ZVuT6W73XdCs1_*S+qw)_smR-h141Qh6MqK66jY zX!YWj`H#=`#(tf1M>JeJvHEV9VDW@Qa*Xj)ICgWd7P{&Kr;0%13iF_2H=r?&%4@_W zI({*Vb;RpI{p~r~(SZX;51}!T@^Qb?7iN0a-76gy4tM$#uM-X=Zg)rd&qz_bHB#_7 z1m+I=a4h&Y>pEoo;-et~$Ns~sgONA0~0`o#;y!S=jME${kfXrZ27 zUFGrtzcx32tq$Fo}R*P0z1Nk9p;#|?g=XCMOkb&5`7pxbxthm z`}a_;WeA7yCl;EX^VQ@0=3;h($h+kuPXVt9B3Hr1ZhG?Lfh=mX7Jf+{befbFCZaPp z<$)L3gA(=}5W&>h8V0{ORm|$LaE9U^`z3RqkZgA$!MkUFWGc(Ctj-xkDAG`2*y&GW z)TN>{Q&(&+$w5zC$?w?nmn4h4=T?OlN^lV@2crX+Uh{83qhx1$K|Q#_C3A;7zr?<| zu!>?h)Z(g=82a6-89}Bb;lJ=7h~iZ(&z1xP61Lqg(96-|@l^y!)3Y(06vb5ZeFohm z(5j#54}(|8{pKa!Xrv$uH!pMVhbUPaFKW^%Nf{mJtl&#QGyWS}7+8?*VQ#1)nYttE zQDRYjZf;na*7b)kSejRV>R50Noa_tz(6i+_$ud6aO&1*2Te9Evyv)i}MDQpR>ofXh z)?NJAV`Gy3L_cN)kqD+F2yzxLI8!aP5DwA=`wcbcBYh0bjay2rdab!H7`QTU11`_l zf!JGYu7>&&W45xt%t~+KHACBviz`e>M&daZ8v&izY^ScyTwYGd<3BNOyQg^H@-X5wWnH_p@y z3_5kWZ+oG>B)eaGd1<(6Z6;iieYLSrseaE;b3|q$|2Id^41v!|hvPF_Jq>j0wI&V^ zI~w0(yx%!{ODo^YN_O;Te+TXPYqU6j`?;Bg8eWMV6$jT+*qm$?FG6unXO8NRe-LoX_a1A#YOY(E(GNPw`!H z6M>)_nr)a*l|26^Tm+I1wP35gwe50Fg8+kqCg zm=2e5YK(8TFQ1R@Q8sCppryaR?e$LZ;m^7xcnpLvy$CJyrD|GwZ!2rron~q8p7BGE z)c5!=58W{|tkJ)(bA;Art#)~OaJ9Mo7>Qv1-CY~%ASJ_s*dV_QI3YHM_?P9Bz_lkq zhi&KB6Nx_I6tKwPx>X&mQW_(Ho*>|718qxQAnx}!8E1t$<43n5=TeNb1+DX0v$|8U zCIoBx z>5ZrJdU@R%RxcNVBmx#rC9pnRPVS|jXvILu=bh;+{*jBr_395(_Yb3B7$zty;|=X< zYNflgp8ls!jL(}Kz1JnLJuF`4lvzt+Kzk)W+m%eq7w~(LkV|c+fJ8A;$`l1)I!1aQ z*Hg;eIYdBe&fNf|hq<{bbmxPZ0kLi=VZ(4N;P*HHT7o-nK9uNO(pV==Uk>>RV@W`$ z?GX0k9zDlx1WFM*gv7v$`igl9rStWD`5s2U9cPj7khF{Z1y>so6C1iKHRZT#=a?7({u6^iI}( z?K~fGLoyzpKl8*EWpNOX2S^96#r8){rEPpaph_`c2nDQ9prNJs8088uP}>o)s8u+j zJ*kI}{5@(|DxPR|%?_`+S1JiA|GpL$b->W?EiSg-0MTJG!6?UczwoO0EaAZ^on}&& zNI`l#d?zc(3dFVge%)N-;$?Q3?@0{4>e>Y6TfQrPO#hVV`U-D+5+`Tmv751z_!6{* zdKNmy5mparMA>zAC*GAW3e1i}tuE;pBpyb+|9s~Zx9HmMEBK%2k-~d;@!}l}U4F*V zGf`hfS|0_Ps@>2J=;7PPRQYQ<%`ru{C#Gd`4Vh*VmVZh_K2{WcrXo5CAU=q<-QYGm z<3`)noi79Eq9S@+v%0PHhOazh2b(^DpxNF#A*X-;Qvv|K?*c;_8C793m?j29T^XM- zjok3gvKXX~7MjfDKW|xC5B17eoi7%f_sF_WxekwZ9O4D;A&(9hN94UGk24Y%qDj=8 zb&Ty90=Hv}QF+byH&7r^5}T42-@l(>C2My3$M|oR-W%;L6DTh4i+{W)ORay2-8RRV z-EY*EY_r#g+B(}Fgi=bb8xJg`Jx&k4*=&$^uq@Np%}HNDf8S%~Bly~A=kw1)#{~tQ zW7K?P2D4enr@n53TWlRJhhi1VR^ErRNh(uGkyB8+m{srj`+}Vs@!lC6Q${89zmXb= zx(W$Ml#}fw1stZ89cD`QNL1~}00M1FR@y@)1(8=RjOu6f#1ZjCaSdhNROjO}4Y~P} zXSwA~xrLHetM;Xf3BRPag-Gux?8HN91Oj$GLCdI{NFs4g7hHw)cGpH-E#V}rq~NPV z8U^TLp5gd!2^VyNm-^BTK(qgE*Ft(NH4u_kO0QV2YF>8rO)+3DHbW1XTy}**-RF#n zhk(_2iVBKgaDf)N$LOG>+kM)Gye1nZmj96Iiqkbk;GKu-P*HEY9CtAns4 zqY&99WKRZlS44s2MdZt#`1)cz7KIvZ2(je%;W$-WC%ieaFd#cy#%k_CX;}ELl`(nk zJuNIIiZV14vft~4J2Zn1!Yhkr-UnuTH4Wm+Qd+#Uud9~yifPQ`1Tr1u)^=&as_;Hv z)c&0%GwcAk{Eo+>76=bqb7=9m8w&}6*~)mnS%g-V9NXg?_064%e33Wu>}VMfa%RtQ zZ8#5Njo4xK0mF$O>#tK8Cyth>UvzPDt1Im4*obA>y$DZ)yb5&FdmAkc<6YdUsCAK8 z?`=}~nqw%q7{!zq;5E{}s}U>$$*kwUE)U*_M4)A6X5b+OmqR6xJOlmV-rQjIye3Or zT*ZOMlHZfF?`(?08A4G~f_>&Ch8rcgX5wb&0Y90e5OXRo4FuBVkpHcOmA8LSq5jzl zF9TYYh#|W+CU*wz$I_kvb#($=^D;`Z#@yUjB{P5TkarHC=TSbnVuu3G3=W<@Eztl z*+7b934#`=N!>2lCV@Z1_S`%M?wSjXJ0esmGOVSY91V{K7<|`O18bHcFwV?cyn`9`3w=fD{1%T#G`+l;Af`+}YFsBv#U! zvoCg>Z3T1>ClhVp6<*i560p!1c6iJz_0Pb9B$N~P{IrXRSBm$Z3ic3tn*QjGdneWD9*Kv ztT?*)&`pz!GLgmbPm>G|*B=iC1Z4$PeYkEr($1L?gLqm=7OjzO#8W|1n<*0>s|O4h zWE+_B3d;}kswj-y*uWr|D~GeL&SgwL6; zd+w6=j|?*<^|!Hb2|Z`GWBPq93m-s*dQrLY?lvZ?A<#Rjy04U)fYL}Yyq1$R$6-&P8+LFnde zU3R&etv?MN*I4A-bW$Yny6pW{%oc4w&J04nJuVsGmZ`asZ_o+Z&1##0~$O1D*)`77Pi_M_Xd`X6_n*d8poE38rI6Jio`r$D!H7-6+N9HvT zQSIigk$1BrZaC;B&bhkLf(gGxsH#;@m`7SkB}xe};2Xw1#S`cge4-6LN2<*eZRFU> zMlg+4CDIait`l{C$l}zl4l=|(IbN0mJ-hKdXUu;zYRj_ zCY9cFbpAB-@CsX^MvrPmUW}yfC*4*uKXS{ZMmPruh#lsskHHSyeT)gsO;- zi+Mi2Qd*0P-lpc2*g+s@yWr=2tdoTvs(6};bxN{Qe6j|o4#VZeFe}gV&LGR6Uui*biLg54YHcHyH^4EPC+qv% zv5)>SE@z4r*q+l~5R(DVBOrJ$ns&wCF=A8^=(J*1r~%Cqa9Wq{#7F$xbqbcK16r2Y zcOyUqRNNnuIBQ!$w6f)|RY8}YN%1nI5dQs@C82VYyR|}uCXLb3$uGe?FUm=5vS9xDl@RkFqg46~VYV#nAfy0nd%Vyb#A=hcc% z1JiQlOO*(F7iM1sYD5$^Q6tyGVP*f8_X45 z496-wr#{SF_xVTy!}pQ7@_3QfaHw0RKdTl)RiC7WA0JG{NJHp1*J$=cf8K1S8vI7w zc<4!Iyp1RM5%$4m01b`IiQFY><#XMA$6NSmi7mleXdgR^e1-%mKSpQO3oUkO28moa zK0C9Vk5clt1z3)x14ypJrH3S19Bd8GkOz(t7=)mafQ*gfANoxAyjE@NqB5MYGwbHl z&3(9iN+qL8Vqy?6mbrB6a z8FKxxn&`iVb}F0@=_yEuwC8|p9FT-VJ22j;BwLmQHim2L%}tlg!$k?*rqTACw>+E^ zdJ3nf%2Ak^;i`->MmSftk6;@umBq9)iUhY+)t;cC{@P?9Z7WSklz=aLnC1pjKO~|T zC^AolUR~~GEoBBxYY@!%ogQJeO)Ck80WGHk7-BL4G7eCk&Q~_U#W-F5xA6WN6S2Ps z0P>NJVh@rLc_Q$gzyP<^5Oh!Art17u{z02nhr24GFK-=Y@g&7Vc zt~P7b8(vE4a?UjYn=o|a1~PFJ3m-L&(5=aUZ<^O2IPpXgLnAczdztuwUcszuAzs% zkuc6$DvX`Jyjr9y+?MUN_?OxSnrsq~qL0N!3U_ZAQxkdUKvV^cOg2<2V7(?3UMno_ z=#Co?+XcKhJLTn3<+@RKHVb(#=ar} z^fnzW-`K#3WiS|*(oJzpy3=e-oP!qUJs&GU@Ln#Xh*AkZJNuOduQ2yO*G)v4GF(lq zk)voqWA&`+m7wmwCn60=a9OV-8IQ^=6~;cls^&6Si75gRb_1nw?SPz03Xn;9Y@S4FdQE*b|LQ3fRznt$ZL~R4(L>|$eWyf- z=|<7V+=&~Q4;zuO8X|W(4O702$~}^;sj%1Ep6F;DQBHjVY~LyM%xtu@zM6U)%jxCD zx5pu3F|LF1_|LIbLQYg%cf&loKptX3oRy8MD`tdKP!<<&nJ6EV5&k$he5ZpS$OsKy zOE?(4qPVJR_+&BlAolnU;6ZXJ%0br{T#_;W2X((T;=s~l<>a++a6S9zmuc2WUbK=r z%KH+5Sc)+YqwXk1^1NVlvvE1upU+(U0mIq*8|{XidW2*B;rrX3)=RdUYH+gXdLd(W znOx$S3)jZYZ$i(l|D`i3vnI$SYKf-zHgc}R@DY6E-at^!adu)O-77MkvYd=eUWTS~ zw^?f!l!hkf#Ga8%>afC6ijW3z4}y&kc}BlRZ&7lH51bO8<3O!K!32$3$ryT(Anro2 z_i3Nl4t*6Z2(9M6@EX+Cxe3EgL+iz$BenIC$UhkiF4Cqnl~{j6kU19;>^bz=usfD! z4>Ra5Z~^^-3^>0yMho|T>H{UnnblSQjVIDO-}kWfC!e4}IusKP-~aFs%%`lRSxX`BIK}&3f?E3H9as<3slSjQ3<3Qwq%^H6-b|G#|3jYe^nxfP1aEwOPq3M zPjh5v)|g@E7GrDN6S`vSCLr?_9{^iMmz+)3yU1(b@3Go7qTQDS`A)0{4S2L9NbPt{ zHG2-Vxu+Io7mw3NT@R9%i#6A99tk;Q9IZreD6}rlwNUiCac&or&$URtQDP4l2;f4(L^$Vh3kLU`706uxCLjBMoPUu&WcWioVbIXiUc7P8I8{j12H2% z=hki&W@!~^^h~G3F&k}lzlBx_OPc$FdzA66ky{`>UrRaU8v4~}WH8=(gee-$f&Ct+ zjP);ZR_Y(Sb`9#@?oRXfR@XP85e=I&UBaE={)ofpTiAL`k`OM$frZD8S;NZ$N&`T4ewTB*N8r-wDQOLm(*}ko zcq>*pAOzSJ;|G%61Qrh1V8Z^JT=k%lyt2_1u;nKg|KEPHO0oaj6`El2&U( z9G8f3kpBWsWw;T@z%*KeZhzoIK9J!>V}{WPAa45Y5ZDDc`NzkcnA7B z1dAjgc2UTyzDAaUgj_KEsYa$X=*aM)C;JOKp@F zlU8%0U%+D?Qi^Y7yP&04P~{N0qaB=mEzYx4z>2of$mq>WRHKX6i+9wZ-ekW>4ExC|h&Ihe(-ojuSRdVrJ4C&!u>^oSov9S@ZYqmDidiQ%YDuLC>$E zLd@seUq?X}ZzVTH7x+K8k(mcTc)-zAkOR5=r0lQ{Px&*@{UyPefl6q%u3?$dy0nw? z;|2K?fF_tyre&6i)=6m$L!P%dB-eN^QOw6WSZAq};}V`*mjK!*Rxt!fQ(CKuX0Y7;HGAF<>eVk|hsM>IaQJSz~)lGk37O`*ge) z_V<^tBMiQTA6JliEtsO1m%Ne`pG^e4ODFM&5?HFKEwX7?CyWEkJKL4elFIf$ufcB9 zR~I9Wb{Z-4m?Ts9lUeiuhkar!chdt;=HAs(d>rU`RZ{H0o6vDnYmqnI`zjIcf-js3 zzG`KpY<_qGvNqUdwNHFEtK`1`GiQpc(}Xk#a)2E8ji>d(9~yXy7Is2>C!n~)40aHH zn26unoUrGu=9BaFNkVHJ`{)n5)ZLvrh8T{v0Jh-g$W8RCI=%p7BgXo-c(30BTWOXR z!xts3$Hqa=|6C02ia%ZB46}NQCL7pAvDR33DGbIIGv0n^jeM~}Ur~OJn_EXjwc_Wo zG!~px%b$CyrGo7SGAfP){RhhDeSDh^*R7y})I?I@0vSe2!>YHgr0}eQb$X%qrd|e; z5x!Z~lEWdF%1bmfiXF9$nrvI>;nf+E`SDGuA&JZ}zFdrRut5(Q$_F$1<(!#!cO%mYUuIu7jzN^f0L*yOh8&CM_pXE7)+E-(RG> z$?F-jFJ-dOV|hmukpKDmp_t-UV5!{=T=Ao4dxu7rA!9ZLErIElwfT*=4X(6Hy}f2d zPJYJ};*iN*N*VF&!;!-uc!K+^-a@ibeP}px@~i$f2^VVXE_-qi52<4PZqE#CZ;v4{ z|L&0unSX!0(UK<2Y3MUD;QBDACs~A{q=;yGAR>PB70&H7VW(@tvs6KP4{g^5UYh(+ z!FmtvzOZYFe|3>EQJU4R{(7+qTiWAUd||&C?Q^QQp8h4Bk-9_l=j}{d~eVCSHkyVqolfY(ahWJ$u7nWsNSgv7=F{PIq@Aqm4(_3j#NyG zccU1`7{GtW2a!Egz1MZ*g9d&n&MqFP(A&pNhAH@r)zu0Q@k^%$_fi9cz%WQEPe!JG z64KWSiA8OHk5l3=Qf9a?b9tQ$WDks4NFS`=b3$c8uQPUZymlQ4g5A7nG}6oC#Q`~0 zRJn_(>fLsegCF(&Ku)Y=;)sEW_fw& z`DOj!(XnM_D{+(;z>tD$`*>|z6Yo(pB=a()7gHTfzQ|k!e|G0yJWklr7X0LR+Q)B09!RXt2ZsLY8bL0a*V*Ug0I&)bH$_Xk)lt*Gh^_?| zh#b(bgnYW4<`*n zyt}B*pt{|kjfL-OsxcuB4kgJb7$-~7K}l?=FsI`~5vp^VR#LlTI;p zs68d>s>@G${WiAwMn19*hP{53F3TlcpIv^xm59%iTeskU$QjHFsnpDTUhYEH0EHSj zi}l+04G=56jm;;B?X}E5pupmCLQ)F-e3S_8PS3I>v$P{n;#@NOfCAR{nbB0uQaOh; zQtOfLp!o&G!5i{^r(u~Z7A{;JC3GoCHVUN5GQA@{FeueGA310w-FB$h^lL4F&i+;x z*{kb!l+O7cQ|QYQ)o=uP7Ij^&*Qu$R<>@PzDhipEadalzv=tJ%fM^ciZH%!IC}v5D zmvEInn0*41S9em5bxY5MM2jIMS$L}{?hyOK@Fx?cq<$V9)GPzT{kq1zl3o0q?6wfJcg;jwq>AyMU zd>wevjR$6>rXud9cwB;pJUu}Ez^yxh3g#(=2%DVJgM@<5WiwWGtxel>;gh`2?M|Vd zjxj}nrBC8b**o!NRZ1DPO5ZhW&^0{JuV~h=ci4eZ^KZt$4ppHR=x827UHSg_A;aL! zTj9O~uA7LN-GA#PaxKD<3U5Z{chaBB1ynCk@x$3!(0Rao!vhGt!Q_*Yk;rzG95gZy z#Dtwj5)A=+QsC(dl2ekGY{L4!VZ5alsDb^%-!;d~q_IV@Wce4_+`$*jQ=eT#+g@B9KN{X+_KO{( zYNIQ9`(hN)=XAJ>-@n_X3f&m$9nybbo+Ych8C_VKZII{pC6h#DFlUl#g1_6c=&!8A+UO+&Pwt?SVRs)^%Qor&2I%(8%Qo)p0{BndU3)ZJqT?6yv?hN za3rOw(KE2E8(t=LRSYoojZ__Z`cuRDhf$CnU2aZIL?n%9W9>L^i@8NWTZ5Vpjv@yO z-i{WqK|be5(FFK2g^Ni>-OoNJnkCHHgkJexcBO_j*!%ALyL5$cDc`$f#4rrsPcQwT z0+8j|*L zH$n=7d)G%OrHV><9NP;#VAs!~92iV2?sQ}7oG&}~#NQgFtVtAbqSnxO3|@H1?KxK& z_fC{#;H1~t_@HK_sMgPRe~hzvCoLu9B3zH#dDc)&to&%%i>4IMwt1o1lH~2veXP7J zre#@#!{m;S@}2GBO>VDCX~#C5i-Wqa8y(Scdcu^YAEH+Z3`WA#n;iRTds+==F1Cih zck5O$S|f~IcOOIfI-hpsZI?ZwrR&7$5_T%Ah3v>>xlV0AlB@rrOs%vI8`mrQMv(i} zr7sEo=#(`}mGe>)0S38axu5As6ggaqYbxb)#~K6csM_ToCLLLaq7l ztR78S1T|4jAHw&`H-jrZAdic;nYqDZ{-`9JvrpX@R=2J`7Q)fBkM0aO*a$oBy|lm= z-$|o7-|rn}d~Sc>P;>r8lts_0dve9PcaH01!g;WvqHb|`YtZssWKb+wFFA9orR4`Y z0|quRSZfqiGz{!7ny7yBZshw2SnGGUPq%ze=6J6ll*eblJky&MwOgLryTWrew*-qbB31}g;U&@ z@th8R^E2YqrFhH6_6KTR9mb>U0y0ISp1SlkNd^o}5i^wM))U6X#^y3F2HhM7*^IR+ zv){kV%%cp;*S8wbv*Fd8VblujIH@c0RIJHgfQlDZJHD{~bSok^cDs}?c>7FM=}^z@ zjqSlw^p@qYl7aqdC4P9u@k;F6r!#8PD7J0+TN)v@E$_cvfA;jo_p?f2cnqF;$IFJf z>st}iEsBV#AXAwBS!t{B+4uf9aj&apl$?*mJhbY99M_GH_Uaya1Rb84ba`w{YYCk> zgkH|C3iB%wihVu_`yz&Q(9w5PDfhT|9i97RL3+w~caeMTyB5|o&t#zo$Bb=A*4@1` z&1*QwRsOl$KF;ihcJ3o4wLe$>8*W!7ca1|%xDMh+oIItm>zq`Ldc~LPC65GoN4;Q6 z;trb4fdu_^rj`o5$>AUBPS>~6UzeJ%xzTlIOmHVTfQ>xzbtU;Uz1)Lw$!m4)q=3_7 zuV}6MvSmhyt~6x1+W9iN)jnvCWN(k#edl5S%nLXC?b_?EiRH};4lf{c?I&7xm3Jz| zTXeBL3Jc0Q=X95C_D_^C%O0MqLHjyfG&!WSZ^R}?4Y-WB52o2ngbP3FnYlC+Q`8dg zD46NIJUiqTL)17IKDqts*x<;0-+8O{WRI=)%q_GtMb*o1=wUdbb*k==lH&1EJA|3v zYU4}KIV|RKk%74*tS?J>7_}r>9ICvwJlzuZachqAj_rCG)kaO8na1*xFkgx#;UF)Ao(e+-eA1?Pj-5#9LNh&T|Vr7tWlK`0R@NQN(dPkyggGvjB11 zMC?wLx$U7+pS``kH5q$dA=Qw)>9n{}!ohZd;lJD7vkL=1T*55i#0mM;%ueA%N#?)v z4P9J}tEtq2lz@k|Aynt=g^M0BJ)Az*lr20qIC}*B9+P|)a>9D>L&5`0QYk-@zzQG! zx-&o|cj@O=6z;|rR7*ZDt^Bnj)NSTs_fZaE36)0$D$=2U#KdaQMJUgVf{SXgCCv&@xRb(Jqnp=Rc>)!Q|iat)sQHse~#dV~xvtgf^It%l3i zQ{8hxDKEz=1=ien);GT|UR|-{IGJ!7#(UK9wC%@MhrMq}`C?2QJEt>cca+b^TH}vy zb&Er!q1#P$nYE_BPdq9bI}R0{u7$h?@{^-WO4f+&7|DEn!*5e%qHN{7vAxSP!DAiv zP}WWnj81P2j+^l@a$h=`+c@jU-fHTvaMVyoRJVTV4C`L>jh?E`s4EtcB|1}ZUjMPH zX>7l7taU4kh|}Pe_Avq>wefU+$#!=3=(-n~yt3%H7=^e1rQgJX%w?I_RGTJ;_E;@( zhP`%ACOh}2qZ?F5Gn{@au%&-3Vtm!mPt+?}l9u z^>W)7Jr%{4uh`PIl%<245+Z=74@=8mm_QHQY%spuSwKTW*kFKEs|3#C>V*T3FURpNU zwJ)UcZ7vOKe5&i7K5YK>K*7__3=z2e1FhqS{#T9CQ^QAMVOrCQtBo&@se{~lC zwt`-S-M_%`r*Mv6;Swwxiv9F|_|*eED!PXw%yfU9n1BAtrwqW3HR2lk|KV4IXjrX- z2yD4Om;C34|8DEgs$&!X_igNT-x^gGe{h4I`8?Z54DnsF6vi^y2%;; z!1~`|c)85KXD4cvLwVct37rYZFCoDbUE>m$n0oNLZnq?gsqpTWzm1boY;Dl`QtC;5 z`gJAiR0MA?jOYW7SYWlSep6U&V|x`7CfOc!FqJGd zH%E_VwBC?uXQ}=)vf~K&_RK6tt*m%bte7rYkB_P+YTfXavnUmwel5{H(z<+@B=gX| z{poPo8h6iWk5p*|QRG(7xNmBOyYuXfdWXmImx$<%P3~i^Qs*HvJUs zos&isi;9X$*u}z1P(xbwf0~2eL}+Z>+?)hCI6OT)**$sK9bK$BxC8_QI5@dExVhQD z6>P5F4sNDiY!0rp|J>w%?;~yLYVKm|j(Yk-#`7d^s@c$ zlN?9M|7)aItf8{O8`Fsqpouf@-#2mQVGhZS8@1z%xYe^E?p#d;S0CoBuxX zUoCb2+meru^WQE1_09jgrIxFuiXS>hiv|NRu$ zSrki{<3DpIij`9lTn2%NL*%6;9(h5xUt`bT(rWrc!n_zLO@xDiktaz2RTnV8QI=>T zGLeN+48*CrpkIi_tIex)?h~O%dR^nB;!T`5a~k_(2r-01iBciTs9PLzvT0|hDQ$m# zy|6By%JMu}tohb#wb3A&efoN~#(HYKWh%>i!#nG^j7&=#r9>wV`|oQxv6D%(@~h>4 zKLOvGpmW6ggZ_OPK_moYK>W;$3mFIb&kI8#7K)}mJAz#=F!YHMER5k1;nXj#_XnktuhE$A-&M)pOIZPC8-V@L$0%flSmgx_%>VAHQBx+~~L0B)0)Ln>%$Wy1~j zG5smtmh9i{zx*7+8>?Ucl<=)~IqDUk{Xz1>+awZwts5D3(jER+Jq4*o3d&Zu)zTy4 zfoma~Wh*24>wLuT!A;s1tIQ*G>z}4yc^-WquTC}g|Fxc{^;Wx{uga*+C(mJ_xjwvV zKCH6sB*W|1dTQ_f*N?p?J74Ty6zSw1j|lxF;$QT7^L?)FZ82!nly@eA%nt`XZqn@* zi^4yhql0Lk4`$WHooOc!D$l6_CPYuZmLX=uA4k`F61Wu-0!r#uK8B)8peAw zc`CHBF$AxLJlTs`#Nx!sfHzVyudEum_A7DbFoJo$r!!Q$P2VW0JbxPLpg8X@d3>?Y z({FId^S?T;_}1f0@H<1ubC(ET?9xfSV^}rd{FB+`*}jKgNnOkv9bH)>U;(--LI|J4SYyDmczFF(-p{NtYq0wJO7O4UIh zMt(d;{E@hgE6qBN3%o*H@eVrR7@}fA1&(@SP2k-XtbJ^9()*m`OSfgguYF%s zWCCcXK4H1|kLS-q480c8F$sRL%OYSsLhiiQ7jJ--7>HT(`@@Sn7;rhn6gMszZ{O6a z9YP$Dni3&=M3r+XK|8JB}L`>4K8(e z?;=jo)F5IKZnGv`8|py0bJ*BrIOBe1|F)@aXG695$5@PAC=s(r;8&)2R1UsnzE`@y z-%S#j*jaD?1&cRv4`0}0`*xT-LorSEioW;vF>2^EB)!?~2Th?;I*pi%BCHC;dr~16 zV!3Mr{<;S-MRmVaB8a9UAIDci?bpjh=DKf9-9jXz8C3Gse&AA$MhAjbg~;Jn{AX2d z!skQf5i$g^Y0L}|G0h>^w7IrMaaks9B_aai$M}Aw&)}app+FG0L>Zhs2p^^kl~-wx zmkzc!-3{ktLi;h_Lppq=5y!PO$k)TW?*Cd6#LCqi6{SQ(e=lmUUf1;D^rP-_2$?B8 zD)Kp)IOljPfxlZz3WjK3XmmEePfKbV?6ET^Gi)Jko(uLB2A2reY}D@(!Y-?N40v%a zYiAe!KT~arx-IPW4n@!P>P}Fj?FL?=lc~{2l;9YqtITUj*-Ii^;HAc$EGz!r{}~L? zU0%6;FvK&ercb4O&-s@dx4$%P8@MjiEeaAhd0;5d}NW%@pqVM5T$Z%+}oUi-T9$}pX!03%| zN^A8Unq*8woWN+Kfvt=CxQ(xE-Nyt?Yc_o%6a2&d_vJ5vd+|?e9>$WFz_KYtd>+Nz z;b))4syb3a=kyDGS{VEg4_t+KD?GEN+4jkc)Q%cn>&wyi5%}1w3w_csQ*=%^_tU(g zm)GkGhX&FiV}tWm;ZvcqW=*r{$?klUtAR`nISGt_;`l5{Dgw`4;CX}`)dT1g#tN$h2v9JoHR+nObNkASrdjL{*IJR4Memjb&?O#f#VNj-ZRB?-kGb@(qmA~BC31x zStI-FOi%~CFXoc}h5bHwQdE=cH)5h^pS5(NUOBeU60@p`UAYOB;aN$w6igdEQx-m% zF<U zzGah)8EI!MdVjmC@8x?q%-`$Tabd{izWE3m#A`4AllJJUOs{5Q3O+S$0vrwQF1hxL z>p=$-FfynOv=UbC#R}~rYU#}0hvR;>AV=34kEwwGUQiun))PtinKxxq5nl*I>*`d_ zc@SQmKArpY(d=s~jnOdR1>K5_>9mbsABy?w=!@V~eLs+7>}3Y43|(~kMJlg@6<6MW zlX=_dcX1ND743KX{*$C~_mFJEKfkY7h*`(^dIY3{1=^yZrnY>Jiw0u`X@ysXFU5ZA zl0R?~S4!u;SJzE97t3{HX(|fMFvqxj()s!MWh|D_H#+g43Hi|M>!)^x1rnhO;yi#4 z`=W|pw7b+{jB()6#Cssc$wezMwmp^`(uOZVR8!GLj|k<|Ghg_ofH8QBMZ4InP&K>a zByrw*HHzWU{urbG3bwIYHp{|n+p~t9)F{$JTh*)cV@=q4cNjL4T49?MZmF2^BBnd$ zTM$&@$e~J)y^cTYV9=YlULFjZ9dCa2u%z%_i(g|-HtE-0!3?&{7WEr=BIWMtiAr?N zOe-xd-Hzhd$Z6lM9`v<~j6YTNKS1w)rTnAY8NrR+U;BFirdcn6c)}=v1!l+PA!JTA zjgu>cgzRD3!-9T31WwvB{kkUw)noU=H*-c`fBpFKi+ggalx_lb&{`E|gNAvOuqpjr zzNcw+IEs@})lwI2X{+zciu8@X>oX~>Uf5N(P3`cGe$4Vc={-o%t`B?&Q8#8 zea_~Jap;^I>P_J7SHxu*xx;M{>GkL8^ts{cuLq&{mtD27{cPu({CD^r9{ozI#=?e+ zi(n8ku82iBAO8IF`=?Jz$zsRv^Q~m@CXgj!C6(+dyE3aLHVF82WdK(Xr-RfScZJ-< zsAV4$);6n1ku?9g*p}RRyXf8RcRwmpAKt%1TJ|CKuAS;KDe&8?PrJ7r0?#&eI+#;9 zj2(EOH_}SSQ~f7KUJLbp)-krf=pRT$i2X`)!}VuC%*Dc|!$GR|K7X@{o#`I%yb40G zpFtf*;BlLTN!;v1sScWx*B)`>Q-z_B^SBHNDHn^t_U}pMcGGp-l^Zn$U3e@bzI$Ca z>stbcDBt#_#pS3cA=wfBVe)HGPlQ4CEmM?z_ke&G(@cwJgK=1?7Z zu>t?P4L6zV4JECF<+BU>@49AV(;4(E&Ok6|Hca9|>pCi%HpraUlXd30i9%(H#-I|? zr`c})l=;3L(==X9qTcl1CnEI@vy%*{ZKQIfyDe(zF++V4+`sa@2KiuN&62Jist*^6 zU_;QiJ)PygP{rbOfaFL>k;{aR1QD~1;>#;3?PpxNgiz5WijvAxD4N|V{6qJFBE@wm z;ZOcHy3s=}m7p=z3ziJ9~ ze6oTM%kd6^R3M3!GUJQBW7~ZKe$qrVhJ-j5wdwqKCb`Rg!xj%Dya1oO79RyoB$58Q)wSL) z3$iR*x6^knba8iKX|LOC#%XS;&CYUT#jDVCiGHMx+!9UddU9vvb92)j0nGhu=}UQ= zu|Zn_>PLU31FllrxbY=*MUE#m>tzq$r31%+a@HIliVI9*3m&x zanz>qAT98j~GAo(siG^}VIIV*;T8%dSLo8qZNV7QUWB_VzZ~Us0FuX!R4;gbXqQ{XD8dUpWELDHC~<@ zV1MgULUlH8``muxlF{ozuq+lOJn&1^$ZI8RbtQt5J@xhY60NSt|Km2D^pGXFvrKvW ze9g3;0dm)kAwm#+YwJ>U@gMWI5SpJO(ooEZHIuw4ba3^BGNK9o$^CS9;mXSN()}F; zXS{eCNat`8=?`gw$WfgkwkzFXW}A~GPd*dfyPmHEi$XWQwcaqHLaDUE9hij&_IJUHVq=iGY)MSiv( zGB+iCOAxOBOMS@wv}T`9O#piKF!m1r$o(~ZdRnCnUMBUfyaVEWtPZ$at};IL8h;1X z^p9@J;Pli(KUVFji2}tPd9&vv9PAzn^s!QJSQF(du&Y((3txgn`k!vnNkW3xI)iTP z%+9&sn&=Ux)!|c$vhq31%b99O_II7+J!jV_R}u^*$V2oUP3b%S!kP_nU;Wv<#m?n( z^u4;K#Zu39;rH*;4jrv&W1LBe-8k_u;7ha-F-3{ui-_-N`^;cE$=@ak!yuPH9cK^f zXQxdV`TbD*HWS$rI~F@AZ?v}+hHOtb>bgleGk&w8aTV)2wlyXwYI;}vhUIkJ&adVw zt0lay*~f9q;K;Ox;#diW&2Cl7fz{!AFGB?gg6URer%JUH{d?{4kl(pT5;Fx18XOGw zk1GQVk2}$-!nr4dJ5(taOYMHW#=j3ppoym?MOlw`6D9*HJ0RaVFiRY7--){owtrNv zbr+opJ=AuNonDSYz@>-JPcx3U=Fe({)?f;oR(y-+Pwfl&ZKYRy%Hr>&&%v-Au_MrfeBhwT^HgeZ^@vlPfzSyF*7{~`zkgM)!n-7^Fhx5@>lfOUkx0~J+s?@6~ z{61S#bob_QFgE+Uty7RyM;2xKKQ>9ea|Xqy5Z29fQSdrX21r9xGK)_1bz$gtpg3V* z@oS;k9SsnHYB96fPYlkF*d_awNUOcp=+*7YzU*y28` zuv%zf32Y!7_fF57m!eN9mqSVC5%)rojAIIt-&=kZzUui=lx>u+^&9^ctJC~l5$4A~)g7`adGYa??S~P=XZYOX}F@-f( zTSIcC-VAoUHBI7D&uR3?(%KJ_AL$89+C1**2#;e%E#8Q|6Nx zc}WOg?!8|f%auB@$l+h|{WOgwYTA`4;95BF1OPsnPm64v7;s>jNoY)&4G9<~Dv&Qv zHCq%zqF;Sjz;z>1Xtyy0AARG3bw?{$C~Pv&aiRG>|DaP=!0%tMu5qren*nJ@!OP#r z)clXeAFhH>QTyi|*(gY=QB|1VYxcQD-f=?B`wOo~thKKMm_C;MHnEqZ;)I@6nwq`f z!65)Pm8Ka=RH7rm2)agyt_u=9;9=&c0w1c!+IpMqIDJA(hV+N_dxFv&*WvwjIs1=g z#UX?D7F`C}FwhRmEOoxW4(4s1<9rXF*M@TNAlTU%e7nkFTvX`^_aJr{#H?{hdY^V( z1+w{YnLyDb(|8L}x;QGRzz@v9djk;yr8f@wv>ShbGBmi(eB7HlP%DX;RW~Q&`Pie( z{c0T6O7cUCA5d<}8NBj&3amWe;78)$ z2u$de(&Aic`tMVo-V8-b1=qni(hNlJH~sE&=?Ck5zU8TYAyS+LIUFMT><;b+W_|== zk2Fl4&?!T=snwWUB zieH{=UREC1m+=R;BMKo0!M~Z-xPIQ!7t7OlLvnq+3VmC|$4w%~Ph}#i5IZx`$)roS z!Qrm8`@@h|O@e@?af7TpL~(`%QtV1iA*prWtzx|Qm!p|441&~di})O=amXT!Z{#La zkgXh3+}@H{@49Ul=_-@CcblhZ0&a7_J{+->0Mh6^lR(*rA7Z62)^6P1zhqSUuy~J^ zlL{K6XETt)B^6A`$-6UF%!R=Ki6xjR2&ii{R!hSfNfGd35XUX>)D}*J(=Y_yyg|`C zw%8Xs%A!+|Unl`v;j4oa2;Ew#$X^Z#f4b0|%p!pCUA-v%j-In^FGSqC#1S^Bb}su7 zTb{}QmR1%>TGh-MRe&bMi_8dj(&K5cK$wu5`$~n-qpfIN^J4SWXQ!Byg11)QNBmGw zBbCj%ivl5vI>>^LZaEdAG_P5x#DqAcR@SC{7<=@Jt&$c>i|lkn4gQfoYdRxdgu5DmVr4^PN-(g((g zgPUt5qH|0;*Wg2|Ry0Aa1c7P-c2lfD$p#kPp?5F&Okfdwu|Lo-T|#VnhxR}h=`@8v zdPmJ?;l~54&@;tNGR$9bIpNebh9lu1XU{YVR@}?0GkuRa-t2CNFJN&VQc2e5171oqp?%U{KAdsQ%@y zX9v0&Y4G7~&lB}M{t?aYE7;@5Hg@jibi1T!OiKXB5neIT*Axe#0821ROGylBV?5X3 zXuM|<_d5aSQP%I@me`l*z#K%m$YoS+K?ITWjkAK zd5z4UIGXRT_MS8!bKms+vmAm?jLWGX6?0hKZvvMC4Q_wv^}En?D)i4><-AG ztlm)0Vo4#Xw8_o3`{7GP)>+=NoZ6Xm-S!kn6OB0#L?;!AYvY}+oIXuSwY)5l%PnxD zM?7v=@_$z9$2#5)GTPaIb1X?4mTpFgxV5Y9*~81HG|-GzW>bft*u)5xZwOtQ1Kb8%Me`=}8(&=NZHL)vnKI*! zc8i6dEkD~I*%>UHhkiVe0JA81o?adk1qtm3nA@XQzVFWxxZEMnxxxdFkgcbS&kQl- z_0(-vFKt$+a>^gqDI^V0b~8)vUT0*r_Fwq4DWz-w04EC*ox~<#XKiHYt2f8yPcwZV zJ{yqv^iAT5{~YjMO8PIW+CQM2(DdJi(rFth8mbIGM9YXnf+^s|C7MOVY0eAL!L}q; z3pq&1FH-m-j~*Q|97gO-bp!;A=Stnw$JQ-UxjSC0{-!hKMpN}D;GlYA?+W!Ks*37q zM9TV&oe&bT`t-c5GjNP$mCf?+-Oi9Y7d2-H`z8d>9O9@K4$LweoXJTkj_!f-5{ zc~Sf^`WA};L(_xN&tmT%RoEef+&AeqKff)?rqQl6GTfWB&r|C{&fR1hQV2?s%*Tvq zNv8s64|<_bmQKDC07P9~TC54%geiAPpfOhkim5&mvVvPgDIOE)mA^@kzcjfPd7aE! z_k!f^v9?{Y@f|l~thljo$p^>gQ}~Z>8mYpZ0sBqLV4spK0ONFb-=0xVKiMOYP_r`p z6yGFUBhf@p71sCb@Y_w)Sl*aQ%GT|L%6(8y!RVjVJ~WOhG@uyyDR!7X8u_-e{c)|$ zcsJfitR>B>`*y@{I|^%5E|p;N*X20eJtZ9pbKs(9QnJdkn~kJ_o2X3bykl~CJp10+ zhG9^<+Zh%*tYhr=*}3-YA!`()rj}On{CW!9MG@H0p(P6!UlNSA`wvzkc5yn6Wb$-j zP@wDfMTsl%<@2ic#-m(e?*qE~3I3R{%6=j!Cg12I&uT7b0w+;5%NTjAMCUPUQhD_E z`KFpSx8fxq@%K@})k!9N04-<*m3;asVdsgCbWR#)Kk2JmrfdC73-uH|JzoA5hzDWvp+b6~$g-+sZQ4}g z<8y<7a6|}zoTH>l*L!5DB&5Cx6ve&(uEm^A(xMIW0*gN;f=5xF8d}6D;C)~r#H*&Q zGu34AG^i z!mmfbliqhtGpUM0292l7QH=rzx6yPu>!EXf$HHbnD>h{fhS0_F!+JyI`@58jdmM|k z%O4*kDLJs{Su!L-W{T&p&3{2H4iSX%nA5*6&zG)` z>JtHGZ=!_3sgP=ENpy|(f;Qo;ScwM;hQ&j9O?P(60Ji?LVy4}%W$qguaxET!U&1GX zUkqM!0ybPamECaCxha!|VVRL`zMm^dAxu6BOL85Ab{@OksgPHLe6ETx?|WclRf>#y zu)rdziz*i-Of_;>@Q{d(?tJN@i0Sx`)zzOvUQr6VF|rJs3CVSmfF8BK+s`W_2;}qz zwDj{g4`0qH=W(VdXITn$QxQB$!+C>e+y51m4vLCeda9_%P|z#6cif+N3A|1hJh4ON z%XnHX`car{s#4#oV&lMut?Hr5ZVgTLph_Cdwj)- zTvm&G!Akj2zI3c6_ln=~#Dj2@CMX6~tZrElQ~zR^2C4%ipnrDsotn>n<k7bmA5o zeN+M)!@ zp2N~Gz=;8UP_O}E=M7f?7nKg{K5GQz7GT5Vs4BsA=Ud(j%9x3lpr`}Cs%_MLbDi0( zvzv|vg-U4(hf(W{(KFq<%V1uvyWk{9fxi0JrBe=N!JpphaD3kEPh@x=bmL~}=sa~$ zD;OVX**ATV3bEcWQihRUQ>V0d`>lu=AUsL!SR;YZ8$F>&bhL4&<6m1TVgCHSG5qSS z(lw*(^zRB3hV z%nuOwwGIn{nCznxOEpmiV7akgC#?|VBNed!)`z@g6c9L|3J<%PP(`mQv{Rcjne~|t zu<;;8O=Wo_9XgoURFDd0`sn1Yol!7^BIBj-L8M?v75%lv(|`f?ggO9P!TbqO0dM^P zVzUS~=aBx(Z#ivgzfR3hYpg%NxX>i1+y#p%95<2k-KC(*3fx>U?&Q!f&Cg*7;rI1@$j+KQ3e+6^V1ClX2 z?C9oap&pNO3}ZS{*!1UutpP(M4fFu8^>3$USc(4q`+_K{AO$dtvQXmD`za7Bx6_?3 z4RbLL<}5?ey$X;O13*5Q*-^Y9pjaTQvHe7UpGnCdpZYMvu7VJ(_vgL1A>H|OlnZ<% zBw^nl_hiajmif|zXi3%RvdbSnptmrgtWu>`@{#vk^e+0o)X`z{rM?_!L{d@}g|pxM zp?4yj7b~thjj7B-+f5dmX1|m{r?Bd2jMoXL!>g}@x3&-W$DK~Hh4boYKtivREePDj z=d`4CY7=D|#KoYvBq07$XGapn8Nt00yubRK1Iy+i&@&hkxaa7;zJ?6ccvFe!7Z49U zm9Ub$m_WXz>W0B(jp9s$U?_wLG4z+Nm?D(1FNahzADG%b+aoQ>>?g1ivAMb469(eQ zT)V{?c|9>EOd2q`w@R<^f_U18V0ZM30_c{I7^{0`vF(}Rofxj!B2~(9fR6R}lB#oE zhI{PJGg*yf>OFdn%(d8_sq*lvtIuwU1s16O5{`%oXvOwZSVNP3O#n%1=kNJm~yg9j;2*4I- zxq$$1M3T-aCtq_}Z8_MpJzHZ~6PoS=yh+y9wbwCUyUC;`8=Hoa9|ZAbOh1tCoY;7z zhhN65*cU=sUMZ${s2L{KG5zN0;LEZBZcQCO0CU`O4>e*xC^$5QcT+>z7_H-&&W}weS)#3kD4@!)X+nOnw0GV z@*rt>^mt4P|JLV7IpV1hJp<1L_xdQcj51UIL&5RhVT)avLna0$x7~R??Ou1F*)ag{ z7~}B*Kud(Dl?{7dKOzj2#h(+l*Ki(>`Lpv1Db3P5o$ZdP%z@wO5B( zkp^0W=nP6@dIP7Dla!kbY8s6A{V4I}^d2gr^-MO`-t$ys zDHM{&m^M_!TS`pGlbKWOd=5a*eB>I8s9k;#fUn}S?>lwDkcHC20^n{Stian3)hO$Q zAUWh>DfoqL;M9pTV5_nJu6Af|6N(jddz#ZS5yCq@_Tfh`k?2=13QaDy9kA5xctGTv zF9y+^{8`FVJm|UFW7W5;_j)YIxh>>2{f3se1pLmAo=``V@!C+02p=l#14dzvXT0s) z&8p<(;TUnzOV+uVI5p3C=ZE+IbP4Z`eTnRx1=@=V{8^q^LZz$PPmO1K(6lsB9P$Th z3vbIlK2s{FzK!C1-pOR86;ZdZ2-)9p(v%8x^y>~j-e6@N4sY|3YKmg_AB z;x4-|j+xH+Ya?05sLC8^wu-M=kH}GNao^JBTp5^<{$hT6UxtYjdEx2M6o_z&M-D2~ zJ1#B8x-z|VR=ifz{F5o~L=FUhfMp1YEI$aQRg~Z!wj0jdwaM0ece(WE%2+5zfLAZ= zYB0$k=Ec;V5OH1|mUi3$%3SG?_*Zhogd_mlx3ROgykgaXT!_8{m5XgjHU{dP=r`BCvpW*f&1OA@BqF{5xmsq0E`s`bti!CPWQ?Y%c48w1h3MZ7#F&|EmZQ{fRo zr*8NjEjpA2@i6o0y(q6Q07agL7#4@F!=R(-fHIe(!n=kga>&g{3CNCba@Gp$aSXlT zyO|hcvipfLk2C0ynrt=jIVTIhDN0hE!YDegGn#~LucrHE$u@&ZW?w3+Oo$Me9`)-& z=H5cwVR%mdg`#9!@GpMF_Jm}b5PEEU^#yor1Y_8Mg8YYPq6U0NB(yP>N3_L3{s0qWkvk_tEVQo2mGS?1TL2&LXIHGBA7TBe_7Q=4&A89BbBQWOI%Z*HVpj3 zLoFt(L@XH_A(dQIw@}!?*d8}6m`D;Z7Q>u04}_IN5ogp*%sPQEk#*ll4NbkikD;oO zN;#^XU82S@c|X!YdVH%fWFBCwG2sNbPd0#5(S&eWK_DFeMo~6ttrJ0JS~S|JqB@vU z0dYQ(Bnp)qyq%O+9s;Db~X>p}ki< zPgJA$w}0VGOv7B{}V z^V0mb&PtdZJ$%{PSdMJ}DdD9-b}eRrLTfGeU#*ia$RxvMxO;r3Dh&1CvGZSMKY$jw zbehz=tSj8QBqt|5Yjug0s0=K*)1Dx=_sXEwMtzLcyA2<9PIou0W52r>;;P5)??(6sC69JKg(G`QiIi^KWSSTT{m_NP~n8Q_pn zIpGuUj3Sn@l{nE8yo|PIOJ>}$6lbjfy#LMfHTf1{AhD7PW|!ASPjU2Km_IkY#E(Kp z8@7MeGY-H^!X>!!g)`?uK)`Y#ZH}9v@m08vq)t-GdU7U;7GEdpHScJ{v}26b?a0}m z?v)v~)W2rBpR`hK>F@)f@C6cA1$HAFL+Mhc+&%3S zl`)NoUOaUM>B(Gs4vDdEmAHQ-`Cf^tpF*bd2hu#d=?a-c&UNgvpGFl$MU+J%{#RR{HORuZ}+L-16nTGso6;GH171^^pP%q+fX=rg(GO=~TKj zUT4-~7kbVM?$c2$6^)YqmfKS$RYfN=0XjcY?mc0=a$QR%r82=Vwd4*ATsPLX#AUc} z1Y`~7B;zZsccD1FvsfRKhGogdm=$$(GRq^%p!Z8lt9hT7s-zI;9xp!o`Kxq#>Gw$k zvtod5@otrlAPKOwxL!`r%3K*bZF&j&u30_t}v0RC_+`uI`=pqgni);P! z`=Z0+ zUas5~@<0f@pMGyC``Rp+c$Se$l;zD*hwbqmGCtKZxYIV>MrPnn?#J~zV*b9Xg}a(6 z0y3-yjcTpun{QXk8x~P4+GPY@PJ*9EI?iOAKPmIiZis&KJUF~oE>-#da@ij={#{KK z$L%|ALLcgM`fD`6$l%#d-9bWM!>1xuo$jrx)1}e$kJM-}uf3d%%g~%l=L?nQ`__jW z!x$0!s5O4S%l(h5fVPGWJ}cBplm4=z81?<8%l#4{5|7Lhd3N5L@MR0c&a5x>H7j&# z;#9@aiPJY}_r@PmGPy>5=F&>3)O&|R+eqeKd-)z~(|g*`Q|pn&b7HpbEjBjm&!-)K zuB5+2o##OT~x!A%uFHBj#juM^X+M!ZP6Ys&bF@79cXZuZ!45FA7L*%*| zv^%|fF&y}nlF?%eLhnYUK|$ZCw}`Roeo5&zOj(ODMwXUhvN`qjL7uoaV|UEM%+O^a zl>JcUi*|NsXmY*@ruBiU__rToBu4VEA3)v4z2Xl<@ad&pKx0#UqWt9-Aa$vMp5aaD zaSvK`#!AdRrw`AU?&9<(N5*|S`YfMKrg6aF(pZ0Z)@a<}U)q_@V_oXE3#WwyaFh0t zZ|u?;lUpd7WC1i7?q2D2(9dA*J|4Fd`Q&W}>+WKkP^lUBtDm`25s$tcJ4SoegvQO|I(-OEbPw2Qfg3d;apmH=R-wvh5{~;fz$l5KS8nh2j zwCyyYf^0HHibrvQU8-uWkkf;mt#p1RNAb0u7pin-MgxRa!7$6&Bs4LqjDWc!wQ~p&1f#!tafg2{D92D$X)mWDecTW~uNb8ir7O#`wRKrx)b@B~ zYoVoKPSxch2_u(($6gr6S;fdt|5Kr7L$59mbZoy0x_!@KAJ;1hncr9H%ulr%P2Py2 zBlw4VIZDJ;a_0W=cCF96(0=6xvP*{^{`NOmzwBbfPs`7I%|4fE)pOc*vn~Y>TrJ&P z=!J-8sna16(r{x6X0JtljM1FTs(yTROjmd>@{~-^R3&X}W~&Q^xR3SEh-eK+QNC|V z$}7thPk$~|vU3_TlFZ5tf_Uc z*ow>UcOoi=LKeBw4fvvBi{QLd18;g{{!5S!!Y`Cg+kVY|6HPTyZY9CxDXj252mZ(A z;^6@KHIuI&cmBE3|DKy>#^qu@+mvaF{B&8rxqrQ`B)ZTZ?l$_Iib)_$U|(i~)NPA* z1;~&>H}Hb?Le>atMbr%|TNu9kzf(_6?e%rvQYQZLkuP3c03kE57Nla-zO&`8IT$G>@+N85sZI-9!3x~yNoEONqzggebOx@u&yFe znWt{GbqCNd=Iyhd}70R9xe(DKDvocvR->60gAdbNcgo<^uc?Bjlf%uf}Xo&b9h}W zFC^mk$2{qW#==!CKs0~q>H%~wlAq-8t`A5!Nem{}iTNd@C)%R2-Cg+y@K?p6)&;me z3?h?0zNBpUi2H$nzKlBI2JG8c3=}&GG3vVHcRHJ)keG65(Re3-mgtHNeWeBt1RMhN zx&SJYExQBJ?&!+S;(RYOjC7Wumh;%28P784aiX%_RaUpZ+NJuPIOcIJfUtaT;&=FJ zsg@z1`um?S5+hv*!JL!QcGu6liON#Ct`5h8ToROH_i?5~cDF|hv#Qz1(TSNg+g$VAVYS*$_ z+OVW|pg#E%$iIos>E2!o;M@b_!h{umsY>dGK7(5|nn%etz+W`tFkt){p}bta=}>gf zN9mP)!Fv$4_&bPy1zr z$*By?X64Edp=TlI8ZZ36RC(>22@wbfcUkXnM9R-{jG9dg4EIkPxGGm^4pN9YuZDVU zmJ|(ycL&#F1+M{?s~1G_#Jpc8-^Zo=8UR>Xm%XMkXY?e|q9CeK$Y+d@9Suk zYcw4a!tf|8`ney_`1YoDTv}e_{sJ2GxvX-*r!$;EGfCVlm~Ss`^?xmNXcLy(&k5}K z4y0mmdh5`81{niAj%{uMkJ8){l~e*osGPZU%5+y{*W_UpX3b2hyCeQoh)4})a_8Yxv z+UbTOUy7WhJpe}rwo8p%`tPkO8lNYW@>w+wI7x1rgnJO+7?QA0m+48>G%Eht{n9YH zZsQxliDxOHJp0ECAkCqo3(W+m8zEr#%1S5Ul0GMux8yD%KVA8Gzt3 zJX*)-*VLf|^Wug} zjxe-r-_G~>V$ts3;PhI&8#{_A(SiopEV0P%g>3A+8m>_78-u3Ljdf*fv7nAXo1M-G+F%Pv&I?{UhNuY=U@sqvsN zGNn13|Hl}QmxQ=6)k<&f1Z0xZcQ44kxqUU7r3Z*MzmA3PIQk#np}tY^XD>pG*NXJv z6~;2qI%1W#>5a(Yuc_i%raa5A|LZe!Gzc?`>|iD;d~98wIQW`nJI{c`I;D)ls(#aX z#)RyliFIP)Z-N)yLpO~FTahUmf#Dvi(~yrMcqaaT&Zj#nqW!=@fl2?SyGo)`F?}7E z>l`q8M}eTidH419h+AfTanvWaUeFNPFG)l_$M2K)dJOUXR!OYAJ}PIglgf70~^OsN!N#n00R z<}X^!rndK(FD(W1a+A$0A4jjVRJLkZB2zs4SW`op2agOl-7bU_<%*w7R~RN26uhmj z&9EFj^$EV0sjb)Tk-R>rcl?I(s&C7(Dzj)QV(*p7fvosqOO+)^#EMJv!Y7*V&?PT> z7TLMEp`U8FOEw|CUzI!h4f44!0neEAmK&S{33jat=Q4Yck(ML!KEX>ajr`WfYTGg0 zRgOV#wy*qC&_^)hz>T^Ts_u27`>L?&u2^M6i(a9t`}76phR?L{2Wnu zc)nFhvZra#?(5x8x|~a-Yf67kmrJ!T$im9^xo9hqaa~0CGp^aXJQW;d5mJG->99k| z;*lG5`Dl71RUp683-N$8=@(+&2|x00j(aw6FKNF7wmHiGW_AcQvn|!a(-j^CaXRI`?&& zpnIv|&5jkNO#bPM-4<1o;1FAR(jA9m$&FO#b;#F7>4>HI1bhASIDB@UK)VX_ zLFn8ddrKLWP|tvyiWSQP>Tvw#LW)2MdhsN=s(F(PI zE+c?tDe_bc{{W{>69ZEaZ)2!bD0#(vsNB4bGWCU}kL3~SN_|l^B8ACeRLd^pCXzZ6 zl8I}d+MbAn6hB%m-MWo*`$L@Zc>0I&c`O=HEe9D)u#?zA#Rkb#m?yA+S;d%-Q zqJw-Xkh}7U3?}72%jmgHG^EJ;AFDQ&S`BAtO$}``E9u%y7RQSaLNBt1I+xA+ZobyP&uRq1U1(c4gudI_8bTgQG z?_4OYpvD-yO{&xD3@jTlZfCtZrqQ;vrG3k4AJCtnk}cXcz|EB^%&^J9rQYZBU^YN6 zo0FJc(e6H7?{IX>9jmFK=Wq?h9Ly&UWwoyoqb1C>TRiMPKUT6Zy^R${J9+oPC-Z7d z-fp?Hn4o@j@T~7s<*z2%O?M8kfeFR>4!WNwt{EscCK z`-Ni{i37oBK?WbR*lL)UC=FnZ>lm%l@>;(BxG5_eNdL2M%xCm=H|=E;y-KEyF|YXt zoHu5Z-i}eftl9PJm5_Fc z-pr~5r`6Ey&qUT~w8t2pFm#|S>M;l18RpjDB*w`67>@t>r)*}^3Af0#h2i+9d8Sq7OvuP#e?yk~?$#tw3H=@Lhy| zFXe~O(y!S0;q9q|sM=|q2;}6J@t1mg^a_{5`~xlaT_LwB^#=pH-#H#;j_DASsP++F z!-A8uD#i*n9=AWxX5vj;^~Swfm!lkfdhYZjd&D5~cPV206F?f>_~@nTJbn{@e`uih zjpiUM2Gw;TXSwa=Q4i#gwrYPQ6-|cEwC_u%q5Whq!62DwAmPpO_++Fuq3L<|Ala=L z*u=$>?_%$Ntm%%QiEU4V`C}i^R+O=Tb8BVn@AAxx|1z+k0^xlMPQle13}A2QEi_EL zD=jP(1hiYt)tQ><+3^qmq!L?c1s1Fq`J!1Mm~|!9)7|-O)Z$T|!5X#{>~KCJi~YtP zqfBnX9`kDkIz4ZFc>WWB$qP)c!tM}FQKldZZwbTGE(rrW5;H|!k^`(rJb){J(a*Tx z7D zW*&;IK(QL2=_ZSBR7>*-mx#aW8HW{soR-(`V}x-sd`YatP^$6Hc?~mfmgfWNk37s- zX;Q!k;_kT3UmoAK_CoL^#hs1>S5hcx96+P}GEQ3$u`iT{&Zd-?+}07VoPJzxjJ(!2 z*)#%2w|`)tm9s!u?=gcy_w^ck13*@Zp(6);Ubn>=p8)A<&>u>j8pb8lWh*u|y*)^8 zjJJv?<0$(8PiD2tLCN{;hwAnRY;Mqp7tcAi8W-YFdZex0~T)uiY z7l}iFYkvoRZzPFr(H3Y(tTobDM_5K2=M4m?wmD@ULbP98pQXJ8nZEJCSnC1v+&G(b z^Ko&Wk{379;+j-_FGTYAK0=yXqVxZ>_m*v0Hc|UGARs9v-Q5k+4bn&|AxL*fcXxwy zcS<*ibT^0~-Hp;6K675zb>IJQ@VvTTvTY~B$sEVbtXXT{f2&V>_I_N^C*`IxgALIJ zg*#z43Z=z?th8NWD%1W;Od-Y}oEwCd-!H`DNUVjIXX8yY6_7B&l6r7p3QnRjEy{;2+g2c!z~L7(qY zZV=4_48}!!yaTKQ>urA3gk=%iqr}rsoiPwL3mjZ z0Vj_~O&*)Yv-#%>w2-%oXrqQcjALK(ybcn3s4Qg+INU`1zj_(;^X@F}b*FKJ9s?vm zTAgr`c+sENG5XU}GWyt|?NsJ1Z-HGOy+DnoEIbd0ZP+~-2kV{#fXDR}t9{7fF78}F8XCigfjR}H8lV54VWT|)Aef*^@OH<=^HH}W4 zKfLgPbD->EXJ|&|gO-o~fvslOVqoOO4r1ml3^uxT_^AD4{LPQ{*iOvUTWmyimE5s? zvaUesPK*&kiY7O+Hca5fA{bn40Z2^}X>IkrZ>w1uZiYYAU! z)%dw6JdSuqz%4u^P9#cUuaDzJHkpji&!onbVga*1L zMe{i4OA498EU7%zlK5zE^We-yxv1ENHV5??M!ZQ|Thev^wO48lux!_to+=GR?5>zH zJRqHU@JD2_G4vE3HFPOZYn3gpHRv?_r3Rl(?p{D6Ry^%_2$^4tzW0gnKo5ROacs60!nVjNtMz-|6;MCTrV zRdkj6`sv{Y7r;>vh!_T#QBWn2l8&G+ItQapASJ%B>y*YOl4rxy_3pBm?_v1;V1?BG z&FoZ^*RcylFtnCk*qXLmYOpByqZ?M-y>KzjX@+e2F=A@DkVVLklh;XWT@%I94w8+k{M7ACL^f#l|E7wu(@0STS-T9bvD%;-t< zMf6nd^!^<_K+C{!KAgpfCg7G+SChnu188ixl z&3*=bLm>(^qrh-ay1P~i)WukVT ziRJ1c6|26Nlq17Oi^(+#ip__o7|xpf+XWihOsD72->w*A=%ef}(Z} z5&6z#Nm^1Ce1587?DfUQ=t4$%zbhUP_jiEMPTfi99Sx*xD4I5?hgLgofk8KIjFd#~ z;)PWAMO*9^!6;vzeY{9VO`l<24LG8&Vy}gRq;406)x#qZf?g|#B3ogmSG;K6K+kOz zAc`5I{sflM?$=I$FH47&w;HKnM39UeZ(jU<^33^jpN-i+G3=`$4{B5M4r!gn?E=pXvmyP~Q> z7Xs*oY<7jGL4*?(W5eQ*jJD7qtWU4LYnU1` zBf~crucbZcNk_(!i8#K3|L5ZyW(SxONj;coOuV)Sd$|_@00`?AR3l|jLmUZ!Py0XiDrde!DmJQX5*V6WwuIy zAJ+@hZdjQM*YKF#58^SPT+`!bmV$#hHUGzy?Lz#`qa=(TkQN;TV@?jkL0d97PLEqt zA+}&@wmFsY#}u(bD!>eo)7qrnttrrNcF|bYNJ6owAmMJBhHQ{u*w~`I-cy8p3RLSG z(`SIps3n>IB|M@!#Po#Ze|C9{m0Qg5;5~-(#ZL*U`}fXFCW2CIOFrcPN&^3g7Cvis z78cBF2FICcYZ^pNSQ3wY@_qxb@-1^9B8XMQsqNoc*pp7=WaEBX+|VPhY~rSICNQ(=rme0EZ!pmzMHHkL$=chP=}0eK zWpXJ%HT!)~B&`6VOr_!+_nTRDeotZFoE=T+(R$(4i`j2BiwziH+iA>*vH_r}i&?Lx zBv^6C6qwVcsU#yRtEKI%nk=2mK_%E1vV{-xzNWe0$Ncq&4Lop^K#GeYIPhFr_f6k$ zNW)$Z^MVL=TxE1Y9TOo3;+EkY*NB-xNu|Xi8((WB*etJgsMr$C9{2_mS4*rkFsvC` z#}^LyC(c;(;bz|YrH?dR;T_4t{6vHlM2SHJ#X-A8N3cu|?F-WHmxIwCo=Bm~8ixx8 zLefXPo=~J1SHY4yf{*^mj^HEJKNjhWkZZ#T6C96tn;~R>o24e~AS586O35}8D4+q$&a!7r z1Q(W0eK$X2VF(4i1(cFC81Wcn@_>#OmIgS-^n>muMAM-yU>=Q$rXI6a)&hx+%*h8m z`5*3pG87X8hmDMXK^8!ph|fWC-6Sp?C~f{1`I3vEtX8J{ITSXEk_~c6D*;R`wN!Pk z770i>Mgl+6pvo~$o6yo4DHNJJDOj-XR5r%BA$Uv3Td3TMR^_+Azh&%6PzDEKULW9Ln5>ZAEQb&aqt`yonC)Uvw#6qEal$?EozM3#fjJ*gb|2 z@YpiNkO*0t)d4NtbgQG;U;J}95VKA~mRwW>?HADo@QtF9Ssf>Z{VjeJ(;J)vq+Bjr z+7pWzGU`8Sm#p}Ka+yk0?l-_U$epxNi^53@c7EDH&s5{y;Zb~={nL%!eAxt=4dV$2 ze`*BibLyBVvPB^Yiee7ERVHRa*h{iqAoQgT9=3L3)QN{|Tg9dI3KiwEi^@h=J}1g* z_`*NP_(p1D1bJ5j<52#BkY+vt0Ol6(DHVW&Hx$p6!)t6T9##%AI2DPHEX*(l7@Y5# ziBAajbd%e72wlS<2(cUpi0{XUf#UgpTovTd;Ivl1?-_`6kQB`c3uF^}@$%J}$`RPa z*h0Ys?p{hUSW%4oaY4Ub^5*iD9-Wu}1Gce`F|MqA`68iM_W5RGm@-|Az}oly%ceWT zI%~;*6*QS2O+5vay&619eJXTdWrhP#8Ni1v(5qY8l-7oqyelh<3a!_n2V)7uxbldf zk?7PJhvRXX$LZ3|AH}2y$x}k}fGht>>1nDN%sKh!%L+4N|96GT_;`8MVCx&~Cpzgs z4{@2!`TJ-#oh$YtiS)yNe+eR+V!vccg?+@xy;~yX(MuwRx$nz>egXekD)ZQhS?7x( zWPV#N6iVCvnO|Q!Fu$yhd~+%cgP!fjS2LN#wgZ{72cL6bzkpB#%wigymay@ipJK59 zz52kQZj5u{&>0D->vyV>GKk?iA29Fk>6d8%^dbz*8KS>gEfqREbb4oR?vUV&c$dXt zF@WY zGTuWC$;*%dyAB;CeCE~hjzluruWiqCagzpba3afXxWy(na=My9Ii|N|1>L4WqMiiT zbMG#KI8i!S^XFG^l$`8QPWri7{)ymNsmF9tO^L@NOX(mEx+Y;WH>D zr4b$RcAZt#NOo{~T$;0cUR!@7f@g~*;sMdh8i?p1_ERxY$TV`Z{$Cjr2 zsYVmBX}}cW$_a+Ts>*<6jU*7Q(S#`4TNR4Xwh=cv>BHMySfWIRLwWflQQi1WEsF_7Jx46TbIIc|H39f;U6pO$eCf z@GT+UNkcx~CmO)=VAKXevtuU`@aWirz+n*L1p8GCBF8M|kZT(gV1lvr0nYB;{829% zGHu9FIm&aCjmf+{xLQHyxv=| zDfaRQ{UrAX#%DE%@tHVCG?qjtkqxnP7?@GuhGXV|Nuy5zXj8|!w1^O?6u~^q2z8xj z!2&j3KF!9uz^$TTCPZ9V7z*~l%S&u4ImnbM;}gi4zK(;U0c8MQm7M_Rwxjv#YGGjC zk|868q=5=@H03c7vY>#*GoGUt39%hi0^qtwK@pItMaeZ7-JKs1PuwrT0p%d9;k|^C z`Z`r`!*wotX6aVt30PzcL?^4MN&Lv1ZLFP-4m&)~;WEqRR{>xahTbzYT~(wLMT% z2s11YYJAOJ*%Y6Q?2{hb89X9qTP?#Y+Kpelx3M7U4JO0#TMRa>|7S$-wx!TII?{ra zaB2Yl%B0{03hhQq!KMnP3^w$DiHwG8zs`c|n)e3?|3WPflq~7p^+0j?c8X4RD2la6 zIXu}##EJI~D%(50pn9t5fSv^O-&$?CXG_%kqzs?Ig6@{oo*9<>` zs-dL!E}#I4dLT}PLhiKk6y>C6$WL1mN}Eh&59~44k2o_W<4#I- zXT%C(M$pw#oa!=kkckUQDFFzNm%|hZ^M=d&Jybm}RQ4x~Kh&%6L1L`ItdpVxG15A( zE{{CxT|Rekv+WQ)+bO%(bg}dyS5Qi2%exO3gkZxgVYu8&+8#E`+AsW-Wy`p%G{i^8 z94}0tua9=c=3@@G=G{;H1_-U3woHN{ftC(-XIjiw$M`fb#;0Y(NrJaW(CD?v#)lsN^F!iA8(_xiPTSI4=SO(>kBp+^1a6 zbBu7mt^*W_3UMymKcO23_6_AAJP?ubM>_yy`EhQ3gv6lJ{ z-6@)GFCwT>wr5XVsEU)ZiklQI z4gl+HQQ^yi%3`(Dpruie;H0;fj?8LJ@cWMib5JTtO)v;dyaL40hRlpKaWC^QZGz^e zfmY5bDUwudVD!*--e@wcpKN@z$vT)aEW0l5Ex%U!hf$FZi*$ESRzV)Y#G z%MKtxl6?<}yt5}p&t-vQ5NT2Ck8@KrEE34hGi`UjGeL;LW6vMM7gs;@eg8%xTc66m z0y)Wzo&k5Ch-L*qLGczu>@|?;H@gq^rwd%-^kmUE&K8Z3|PWA{t(mYipswrFGlA0{N9vWHSK?wMw6Oh&nb~4*fCeOb> zYyvmuk^-!&?sIc;ILlu|^}sBQSxvq)=OM}OA6s?5p+)xNzDjvdp)O~j`o^O_0^4D} z#imx42SmBtRaN%t<|B#pkn^OR4(Qb3fB{8wFgBxc@;RJ=nEpEPR{MpL&+x zOQOM|qgHQ2OfZieHnnRQ#AiX?{UdmXWXOE;ueU$!M=P*6c1Wf_8X2$1ki7+`uu)J} zQU3DSYCjjBmL<>9X0#}Kp6bmv$6e#Pg$DpRVlMsjq2oL$VAA5AELg(&JJndv$#g;J zuLg?*e&U{OVEUUi`vZIkAd3X3LVx1O>9LT*bd88vFE9d zP0bl0!XxLsW1hgX-}IRm?0$6%6#9Io8%I6cz!pNUkW4)Y7`DEU$*G={u&QNIeqYCA zWR9)>b^yyy`L37rdCF0U2Z(#`mU-XxS=$K>LGD)sW@mZH)NsQP6Ftq5VOp}2>~bi_`i!Mlo}=-E@Lrppy@S%wWK*oobesS} z8a%{)*BuY+q`w#!_BDcQ+Qw*k^N<0wpcaZ`LGL>wh6*BV{sOk5g`2O|O`RuhF zFKuW6H89FO|B-ij19;iT?_hhh+4sx1Y_t1nlxg^%dJ z#^g%!eX7rV5j`#^I2OxEz>fH#GSvpj^z%>jEUenyF>#WW4?xY3=VGjlbI69T)i5so|b)!jQx_X5OZx z`GUFrku=RCxhh(S7NZl6&Sak;?r?9?nl)*(KtVR%Bc_w&q)Cv9yki^$t`1>f-LBu;J}3v9zzZ@ixm2S56iHfqM`0r`7Legk$l`A-g?f2|64V@U&}uT8 zT)Pr`_U#U$63tlrP^qnvdp3tCH%{GR zU^=z`X@xisC%Nor_l;hhV1RV&6i~8j=uA1JztbwF_aVS|m=)WT*0-n0bfqJxm{i&HvN-w*(df=b_NevRXVm8xyk$ zwGr|5h7~+&tm75!p5|cd)DVR)Yx-l$pa7RPNWn;PGs>Mh%3GNiMi5xu4)1q@l0d8F z^*RoR+v)l*g$09x9y`<3cK7QXHBVc_NvT-j>fP_G*zvcA z@}K@KMiES4ZLdd-h(;HcV|X_JE`oM<;u_ZN1=;J$76PN}&@ZwIa-P`q=th4gtl?sI zeV^uYIpaIJ=~M005zMHglEl4sa}fpwDWy+1Hi#rhw&_UAg`1cZks7bibY{EVU-b8) zITwp4vO6sB=^cs%*h48s&u?F|y(OWEa&CYYr-ThO>>R8Kf`6=fM2MTP>SDGQo45M3 z`&;lbZAo5wHK@$+=EbAwzN_7JbpTA!!V^}hiyL}NTGLrC%279VX~1HCOZe*_x%sFq z3mhgHqO~s(gEAe`IRwp@gSd!#Tf|q3sb@!j-yo8+>_;YM=x@Hp`R?=T&_$oX73(PQ z+2#7>J=^4skYMNX*fV-f+86|NJLZGJ=l;OxyQ_98G_^ZBE# zZQWFL`nCSvA~GktuvT*Bh<&JE`cANwQpN=xrL&j%>X1aWP0~jPVo^w%;<)|DLz>By z7UoXL?snR*&W(^<-TCa%h>L=de#;_19D6;YIFyX`iC%I>A@z~Y|GlE`*`RPR#mQ5^ z8obYigSC;o&+46&I}IG5Sq&sMtNrRhWw1ii%yse{a_A9N2psb>T)tn6+@0_%f2ayj z(+OXD!^?{m^!f=p(p%8U-YysYpfG9KY%kKc_F17S4L6Nysblxw z`469a>bnkJN#$`^9u{4ch-#(BPOtGRe!R8ckWLQWeqHEJIc!@k5G;%E&fRgqmBUY; zIau!TPth9=*3Q2T(Jm$M_&rn)44HaP!pP_6`S`ECe|F3h#b=rP<>1)ydsijbVO8dQ zh^z@ZMOjQq45^1IW`|u2%AYLX%A&F{+i{eV*{F*zgdQLc94E~Dy%vxFaV_BCNsh~@O2YSJ^`@F)T5W?>{aG8PQYLaOo6}1{R z#_<-&9U`mHE zAV4WhYh~n^joW*e_w7J|PzrMZU!*MAA>q&A(6{k={W2qHpcq^OHjK05KW6@cYL73h zHp>x~mi3gY!A-S%I1IG5{!;a4P73%Dul|`zY89Uj!7yZr&MYg?I zf~#QWi=+U;S?1>D{ByrtNdwglWr5U+TK#UAEBC2fF)nXLf`ebvit3+@hK`ImtG~mI zGsEJffl?CX=T3dO7^r3LLLJPpYLJ1Ksp8;cZUqTCI9uggT{fwAAOp9L|B}T3yaI-R z2;w5#A_hTty}G3?J*AjQX1WDQJn=Au5II`XR&smdtBo41@p9|Us(QetbXh1xb2M5#^ZOG$*&`p7 zgXp-F4$I!^7eprCaot#LupM-MhD1Q{J&6%-|n#bmK_H2X8{^NM5F*C&U(%M9+dhct3Uo|cfaU^ zP(UEM*R%bC*a!f5|0R5qsHRkFlueN{>HWvC3}G+$0r3x`19ld;Z}hKisCk6auc5f~{eGA=C|!GG%wcQ0e9qKwm+&>bc)C`kJTZtLL6(wjnmc~Q2RpF&|; z*L9_+!volU`hX7)LgRhj5_y)0A{KbhW&LX^=JyiFz00xiY<|?0m;E%d-xegCI9-dH zcmYVeKd*|-X+aAdOQ5EUo_ap*jp2}GE@Ils3nlsTZJ9t^A7Jv!X#i^q>whc-&?G;mkJwRlZ4t!GZ}{AmPiQR)%?d!A&#?fX(jHZ!1E41aVCmO0Q7hd#CI zaT-9(J%b4MkyxyEyTKxkT+V8V0ZYe^T4v#oZFM=kPOnF6B)87jG>bJ8K zn9u52Op5YJJ2@IxBaGE>-n`nCYU-A4!r`{&E6y5y}(x1tA&IeCqwB4_8n9469=M_&^+7^{ zxe-KWcDoBBhj#E&%;wHNWB%LnE0~37ab+`9xMqkSR~h!nz{og9gytl>@c) z_lR?ZE#pMyzfAS6cv>8>ey~jS35Qstm6ZCn&n(S7CF)ww3FRbslymZ-)nh(?AJ#r- z!ASiqluhu`CEb<6+j3|(8U;O6tTZV4@gHcYtkU!}XqTG4Ffn|2dpX~B{!B~~}$46X%oWCNWK^fZS8XVrTw=?(yfglNKbz3b! z?0)+{zou}I_Nd1oBJX8;R?Rbs3!Q-cl*Ziw&qwDtfj7{pr)0-cS(n075MjB+Zi=(Z^UoseR-z6^*w)#q#*p-~w$BsED)pTZV`A#JYQjw+jvoAj@F zf@G_OQ;dSl)=K(7(J=u z+o9FoEYdFvjqQmblq~@kpbR*TQ_E|}PQ|PTc-nm* z`H&cX-=bfo&cn%8$WZG#6a5@ImVWA2$5s7eU89!Uhs5s#x;y-9Crv}-`yv6+BUkAQ z$^}y-5+fz%>7%e>-8v5$2mIY$vg>39hDZ_t{rZXzwakWs6n-qG{*>hKqz!Lq%(YcW z$9fX!<$h-{Q6lT;*35Eqh%FT(U$w1caeeBXmlzGBJ}~}r#;{}7%)D8;X<`B58<$gu zX2&nWIQ^-Eb}v;3a^uGe$mk!`ao>Uh7&97e+fQBDp3|H@JFJcqExG%r0X|A$`QHlp z%&+MZa`g%op8P8XW^wt1l6$9?z4ipw=yh;1yD`wUKizGYh&+{f({6z(gib;P38{&9Y@=3&yoTp^%&Bb6*eIXb+{2ohZ?C#`TIz= zLBX*=!02e=o(HGkNY>k#g_L5ggy-8qk;U#o6QRJ=Z}BgKDs$b3+EN>W^Wm|eXI$&?I2ySy1^t_uTSl01kpZjRDRH;5;{;DDtB_jmg_kz31xbO8`mIjM;X{ zBg1s=;W8U!3Y4C#ZltgrS%1vHU0_R7GkS@Ik=6szAQHvoZNvCtr-8&ei)-}SCV9c6 z?GtF3@IlAZ$g5vVYz=muH53=#oSEFZ>-cLYy8E~$u3Nhv)=zi0AqbJ3;Q?asa``q3 zZS}JFGwkl)X5R;%f`$_h;=n%=siM9vM{|l6lx4&8N*b2mB62V$d4OJx8d#$x8!&Er zGd8PUqlqPBCW8A(F04ema5U${^Pcas^#0d7eU}^1^eig5(d;Lfm?R z;FRahM8O~FgA|PFuMiZSGmYS8Jk)AfUbJK?b|GMqM(&KJETK`wYV~;#7#eVZh(sJA z2&1euNNf2n-wBw|%^5MsTuT;~v>#Sp`3CRcRXTUTk<89T&_T<|{FwnbI^5T<)JX58 zEh(n$?v2X=67KT+{77xQ#Nf$&d7bv-YTk3HwZgNzQewe!g}Thh<5fTa^uBi~zO~%f zsX=$342Vn=cm++~`sdq;@EM4pOWV?FjJ}xk!IBvK+-RubFcjXvECp()7W|ORCDP~9 zm(xJs;9SrZ{es|?dD%={F7<^&y@G^}LM0_vN*gIN4ARk2d4{H4OL`Zps*<4iHjF|S zr0);x<~g%&n|dZNp3=PkZk@HxgFG>^Tm|k6{q~9MAy%W z)M=F(y8$ZD04blR9r8NeM~&9WU<0-IGejg8YGmsGG3{C+CqQ~2u9ybtca6gkjBpis zTpcbPB~UJ7{CytoIznRKxhLQZ0EKDm&>g5Xurn__htl-DXa3;YS2)`kr&)!%cmu@4 z$=_!8Pk>^Nc?!o|f11>;{Ve1=6f~E@el;6rPu9-H4J>oEtn8;cTnzDm_T}Ec7=Ofe zDA+`|m_PjRe>20Z>%Nn8$mSzKL(?W6JkR)=uGc8-E_2oQulE_aOK1(rV!QEno2hnM zybe4Ca~cWgvA7TX$y;@+wXc(V#mN#ydHGz9Q-9T)#ip_u1bjxmwZ7k`OWkqu$Mi`$ z%~CL$p_Fd6#=jy& z&P%ReJ0AnqIaw8dgQlYqC%q9) zT}nS}<;&XyKzhBIVWOP0R|;d$kTs8Gk0ka*hpj=H0&QbP*C&f|wbrPG2uX~v)5X&Q z`q)Bze*@`P#v# zN1C$K?MPRdb}{fbBkx6N-eox=i-C%obH3eL$3(T^Q^lkMx2`-6BOYn6tm7N5_^Ho# zsJ#aH#iBRTfyp!nxa6cOTOP91SWjdhDT2`NaS6;WdAwP4YiWE=!Nr70^UbWCh#1{d zH=2)rCy|GU|Ub(G(fADX;>tDfyAJ@<7a zs@>hzxDRZ!6wkUwy6VZG025{QthUx1pBfYxn(5};v^{QdF5iYJw)m&%Y*}JLB9Mdf zcZm={{Hsvl;JB;;I`pG!`x-m+Q#}pLWtk?}D2laM502Ys-DIQFCv00N@UhfF$^Dy= zsx`EgaNQ+0C-ZTsn~60wSx*37^DF7sOM|vIqh6U)5`4GdHda2HTkem#FTvk1CcNlf z3^D^99p2>^_oIr!Yyo{7Vo~Bn0|zuIm3L_ycB{nvS=Q)X4q~&opS-VrwowPqeQeqG zRSjHn+i=;iQf?Y`9BnO1a?(C+&OC#`Z-2U8+7gP~Dq@lnGwF_@sHb6|Yp>+POPqG= zPL&NjH1kf*%aYdW`>cNx*>qE( zikYF^oo31OU4|JBzkG*^Nd23G{GF_g-=2S3OL|6!x8+TpIFSIKyNSE^%$pU-);YTr;2+;-1+Di8$+N@CZzF27oZ3`bMkn zU^{3wHSNv--ibmY9S;c)1@Dz^p%aX@(gyLtwR7J)TN9y9mX`5^J&uvbIMh+DATm_u zRB36OEYIh=z1aC6Dv>5pV~(Gui2pE%Dm9fNDkaU*!Z?}VWujEv`j;fQ?t_j%_@k#F zGxNUhtH{G~{SOx1`yvTAQ(#ZMew&k8&v`=5T7!AZ`=3w9o}i((4CVGD47rFB8*q zu3S)aJV_T+9VM=*c9A&ujFvT%J`L95ns$fIFdd5HJV$WM#rwMRMR@<`wSW*&iN7gG zd<2OG<}L@kpb%H0qFLCUya6yoM8QR*yI&|#e#Z86bUBk4;MrFledAq_D?~1j+)L9j z8cR6+GmSoYa{}thA0MGPTR50642oUgF)OnX=(o`IrXF5|q%9WI4BhVkGKwWm*;;QU zS^=G!3TU(43}3g$7qD>&BS}mbtg8?Oo20K_`A@DMo#PxQ z-@I28I%a%bhXfKYGcmD9HksrMg_7~5@%5S*@B+5Tu~KV@;Ch$qSq{hk@SzD^PThLp z$k4Fg1@op0tL?YWTlHgzEc|4%&6(6({2qRT6m8G!!jGci+q#Vg={r#WO!a`ClCCMu zp>$j@CXmobsE9d(g=NKiorULNib>6$h2j}HT@<0|W_rMRJofn()UFw6FusFMGRW_` z(ysUr3Bx_T>X|SS1RoTv^9Zman==-vPpe((Ih)@g)F^YY3WZaQAmzVaW^Ekh-=>481=I54`}L#Hx#JMjL-V zntgu;>4-{HTvO2|AtU$qe1dk&(Cy}Umh96q{b}WA->PDc;l)LTf08Z@vM)r52wxD_ z)dZpYwqv{T~pU}uSOZM$dapiM2 z$|Xp+`FOE>W9y&(`o@_}Ue9}ur+%mzX^(e1D-WkEZpPrm1mz_Mm*SSJ(c;`%j_>or zH+EU6lvhs{3cG6Sh$k{B!*W8yy7GVqU>1Wag(6KrAeiK|jK|RTYnyZJjBrn1J=st} z^^>I07Y|9Jx}}*0h~g9;(uG5$`uruo9{U-KA}0;s9*`QJ&89UUR*;s}9oNs~j;Tvo+u*6)^ZD%AzkxxK=-;0DL=`_7Z&06l{bz+x#A6vIZNKsr6cl>0 ztfZK_KO}F^!O!h-$oe$J;1A?J^LMo`kh9`6-Z4G&^CvKsPN<6gm;r&#Asa)_iyqiK zixQV-A>_)z#DR{16G7W7Klq(Fe3vg0ZCy-Bv%WoSEf&Hs>c?n;r=s5R@Fvj|8E9@% zh(}}_8k97n4ZP5Yh%Utu^xQ6+t|Uv;ekEDoCyYD*tZi#BNoX^V3Z8Flu?lk0wkLA9 zFn}=nEcYg|j82~EOEOj24{WlXqcZMnc6<+$T5|dH9>{~Jt%K+D=Pi|4%aiD$ti&6SA=~vZsmKA#0T2!?TuIcyf`bYBO(c(MmOg6snh8DJISQb#b%=5 zx8e}cG~n9}loN&S;R7I#Jt|L2D4Xyn6N}*~T)@lP>3e7N+XITFS;FESO#S&J&Ardt zzG|JYJwa_4SF*Fn0bQXNHFyTf~_}@Qy=oCjIvW;>_Q+fgo%$ts`PoA`* zOeSr0Vg#aV-#)`3bafmZSi&Dnxk7JN2Zi_99}}f?S!KQ zt-Z;R`hnV6w4lYB4hxhuWnB6G&Rx^-o1VB@Za4lrX*3gB&86|gUzsh6#MsyZ2dOkz zU4;0Ijvj>ozB{;$_4Gyn#?5UXs;_T?%L!fK2Vz&~Jvj|cj)lTof>9C7`gA1@qh!vI zXp2@@H&(-p;VdoJnY5LaBT+Pyn<$-DiTY+pZnwfhM!QvZBdvz34*(!FM1o1G2&C9a z%@Gi`W04(y&_SAd^bd7fi=hFYFKhJ9et+G;yUtH-0=OorjRaW;99bWhI+wj~R-Auf zodjdqFVYC|i3lYov>M+q%dhH74)mela=1)|-;6$mb!QPI^l`LG$o$dPVI_^WZ7YDY z_s3H#zJd)aQJUBC1~z(>V6E7)6gvnsivK!`LxCyBf)DmHQz851aaR$0C($)Q;^@Zm zh%Vk@k25KjM8K;O^(}*GC%3>!_`X3^)z$bs`s7y;hy}yL=J5l6W4O}o>0fu`z1V5W zx5B@=Bb8XgyJi2q`h7|2fimhu2KT&flF%Wh*=|MaWVwczvcs zkQgs1HnIbdHi=Tx*^w>{L+4hqZd!j*p}gTf%iznkN9& zrV5w&V;mzY<2d4S;EsR$J9`e3Z$Dx9hc781eP-=W62Ja1G8Ag5TnG(>aNbem{p96H zBS$qOQ^G-8LC0eM0hX!z(>_50cZ9Xb)bP7ewpC6ds1Sz6lDc0wl9R?G_ohRog9dr8u@8*VH869Q&ei zz&I4HDzJjqeJX^gd=_U@)~0(>Kz|KR%Fr&=!L+K7i*QxqtIoabMYLx(SxZ_{#7E>ZR0V-gZ!aT?e7xKwgyaj zd!I_W6rzHqmwD7P=rku)KepQjy5w3(N)Rxj#i-K=)?2v2rV}HNNNAE67ok+jPKL4D zFyug8Y1J3_+1{--@Ihx%H^F5%9&VwajtG~!I=y2ktWnx&ONkyRo0U8f`W>zsuZ*vA zms~#WvOT~lRQ7Sbm5y&(HhtE8CQ_sB{rsr#aA~1leL{P-#-pkB15Le5>E8l)lfV4t zx-ck1rbar8Q)D4x^#=W^AN%tj=a9QZIWlLvl;6_v6uIxlH}hqZ3cM>48WoLLOD0V? zUz+8+YRU?|GPgi8?{%ZyBt79nQM^N+${mx&t&|h5uH>u0BwWh;&ijMlJp-0!ekD$P z(y;Xx+>AOUqpWZ2Azn^j(uqa4n=cwTq?~$Rrm6eLAHTM_jYED8>z3h_=c3q}b~O^q zCH_<_w!psLQJ=G2xm{G)Gh!h=$`wn6%A6D`G~FSux_Xnd{X0Aw&48@cIe(vs`@33M zdn;j&drHW8Va6^LUfl-Q@KmwT&~IIq>IP@pT6hyz>-u4LZPr+Llu6R0_oS$JH+}0_ z+p%EF&NM_i0 zPr3?SQ)BXCg4@^7P)FLYev~_Lng3{~Q2$ul{oBDjt(}L1BK`&bx5PY+QqyE=Xo8~g z5&me`dH7|f9bE~IQHt3N<-{~u_@Cv3$v;qJv%ekPH1L>I|B7Crr`2SWkY|GT2F2Dx z&~WAJri(@bUXWZR#Hmpp$=cDvzY&DByE+#aY_{;~{Kq?y%8uL3$TOjm6;>1zb&v?G z2FyGaVz5~!^qmPHjw$+`-}E4}sskO}se;KHLDiz}`9`!(h`Vi6`@`Jr%8(PR3ka=<*l-z%_>wI!) zXOw*EhbQe}d+WSp(;OW=hpKczi)fwJ)ozkSI8(OB zM<-iZxy8=U@_&N1W+5F?z5*tZk~QP8b&}RCH{Iy04z22sbsrGU`~;0{TTQKc-`L@V z{QL@{st*v3ZnR~F{gAo{^=E zj*Udgm?W<&c@@`Xnk^JJ6%Hwxeg$`8z}d1=970rzfV8LZ0BBcQ%P3b6lL{G02X2sJ zVPIbr!JDv`kH{-CAQ@8qkTjWcg-agUGDB>Tf(oopiM{uqZ=rFW+vp`J-jVP0A4&%nW zRXAv&*L4Y9z#|kx(jRVqmI;y)0zMxdqY{|zIFy7VNxXeL!3#8>DRMv>H3avnw}H&U@+tSV5yX`!V)Gy#;hSPw5B7s7j0UC;CtakRpwNlx#dELxlYF&v$(Ve$BzDsPKP(jZ7YfAW^GaD({~!2>F;OA1oK$=ycK- zx&MBN%%2I-T-me?C-}?%j=cjmMJDMFatvTP|NU4URjycCk@jo3fBzErN{JGpfptW> z^kx74^WbCu_l5p%hW_sz`e(uX-zxk6aUWXmj3O2;;>vf$2Dm~fSt%vSDslaQ{}1ac Bg^BZIxghH{qYVlY*)eGp`iD|!p`s+~aJm%|}u{z}t zo|ZzqC%AVn5rW3rN5wBubs*%fMB;B?*yQD<@zRR#8yVXP+tvCPT$&8~VyzK#}J3`-i0|*#0%=$Aywx!$;GP3+`xUtLz!t zarkd|o93#Py_pbwLz|*=AJ+AJ&EUlo5g4{!9pMi&tMb9yS%(fS_Jd`s++8PKk|h;{ z5!+p(K4}$hPiCecwYo2U36EUg;y&goc`{&%t)qa)P1|QfTP4SLzK~??#XXJMNx^C_ z9*q5Bj!BIKlkj<(v4k|{BU7{|CA6)_dt~lBSHyDjBU%Zunu4fzZYbskjXL&_hJQ$2UVs!kzffvS!#zQH*_w~p7b%Bu+DZ>+j#T<*#j>u_OFyxvR z+VYml$_UKhGZX| zl$Qs8tD8NwuyAy-c5=OY$h!q@HD#lr?W(P;BxvU3z;0shWNN|g>EH~%1VPAC5PWp7 za5aH>I@mk92zm-r|8<5S_zXYHK@I!s6jwW8YHejzn55HF3)p>jPIgXe5p);~CiK+Y zQcz7w=AX;Kzl5o+U0t08IXFB#JlH+%u{%Ar;@}bx5a8hC=HTXL181uZgLXo2xK2HT*&U{rS6{7M?c0p5*BAPq#n^IpE)L zaItf8{C96~sSy0ApsJ0hg}siHjRP1S@C*?iE>}b zoBzG?v5Uo1Nhb&JNLP_xGxN{Izkm79g+d(gp8qxye-HCtN5MFYpbK&QH)kT~vy0SV z-H0K`ONncEBJR9In^T@V@5#ZWI`l-NOUMYpeDz7S(78#UF5$8{>;((arly8kM*oC# z-(dWku}-(06QKiu$rX_DjebKVN4_4w@lo`4mOUFETXcjbrd zW6bRm#bWt%xd6x?KW%vsZvHxbR${bxgg<^5kqC7?Y5wOReyO?zi@=+`Aj3-aXR|XAc-dMU{WP=x z81OmqfbF`*f|uF~f81oB4ly(QtY${X=8wb9IN)&8>vCfT@&`eloiZ>yHO|!pCWtmx)E{k8zrZkCWbR@9ZCYAz%YOPWnmzduwteef_Zq4!xOw zp3Q*f{6t>mJ`0QV|K9tM)9EdPo#Y>fvuVI1U6lLKfAttF+kgi6BQ?Ei?*G{vCM4Ic z6AhOd?mrI;BRO{suYRztmZ4q5LVDo76OTo9|C8Hi0|s8(iG7(@+8Dp)EFu9cRxYHK zC0gU#f&P~(l-y?Bn8oU^V|TtZsATdvvG>GLHf$ff$Nn{7{9y<@Y+A1r*wgN+Qxk_k zlxPFU=3*L7T5qnLG6da9243)Sh^rXW9*kP!d+{?u>kgk|7LUU$&mdO}8DHw=`0IY~J8r*$^Q-0i zf{023+|SZK>t!5A$t{lLN#dQDZ7g+$7f=v<4{+HS>J337SRHz>m;Ca9N1rUEFYPPE zG;EJXZbZFW%b3;S9MM6pCYKH3)wkxoi9BDwWv#kcHC!@<{-CNZ4elmf~}Kx`}$-x33f){DjupxioQ>yP(<-L1 zr#OCnCqGrDgZw$-NczDStKn?nK@(l;(Tnk&)!zq>6EQuUYqdI@Ed4QtO0HNS5>or( zxQb}XgYS(r&ai<>T+`F@?MkgLxuiBTHLsG{^zJ(=z2S)1=QaO?JJa~2;Am$?9Tk%V z=GkVdEa)s2hH0}w;CFN7kwWBS_FI~1Q-N@b zH1b&GXl1%@70~XS%zHa68bjHRwsR91T# za*}UR`lx0E@6LaC(Di7PPrM(Qt$(yYcKj}8AQbH)<5zNQbApV`=AM@SEUT(bJl#>L4Ho_FLAM}`b*H%rIv+ANfs`>uJ@kTa4t2kCi-TS(~F=t7~(FK8VmI&RVn^&zHvNeZl9n zZ|TSIE72O^HZr>zyZ7GawV?BrML?1({6e;!eR($QpBjcm#+*Y`R**=iNUT$B8Y*<~ ziAKSAbFRsyjY-rnAVTiG=f3ewtyP@Rk1m3-H;)P;bn*uFn~K$5MP4r4T;xby&e(=-2)}=7AiMNYK*DGOtUxm=U}A@yT08_8@_!t@rj1Wbl*l3Wzb|a} zTXX{#e22wTNur{&eYa*#P5hr01+1G~J9tlJ{c%`dJU}9fCG&YG?XQ^xBYT6NJF>h` zZ#DQQR3QRur!hZ6TkhB311Po6fXL3iIqYs#{^!}ekjBPByUa)wy8pTNr`3g$+I2q| zLT}dd-&)i574mI-`TI|^O&Z*QtAf#1A{I@#wZ4=!uph&QxiUS3dh+guV%#Q9W=TDy zSzKiQ)e2{}z*4naQfc~n&xo@iO0fNYj#kFNwJ%PsDM2V0nehbzI=T8FBPB~m5et;) zuTR7_{YseuTu8WDZ)~`_BW1mk+d$AyX{#}AN9sXC#m()GCN1bYx!7y(TsBr7ZqO%#TY}Kh4g&e{tY;{ibR&R`^`i`H49}wd#K>ikKjv zD0JO~Skm(65Zy?vl}c}pXe&IX4zaD?DEwTG;wW))WGr1VsR{V+nhJP+c2n*~dLfP= z(RQ%bZ?V)7YH4oeMKMw%_GRq7m&@j8LHOF|6_nq0ITIw;#^jUP>pIka6S6>#)=M?Z z!A!dCsq)(0%;$!T>P1T2PV+*=n&p`q<+_iJ?|1(OL%_u35Zmfrj*OL=1+--gdl$Oz z&Mw!XmR-vWpJX%$W>fv5HU}wsz6-qdy>!*eIO;pB_-II z$sOVI=iS@ld;F{|rXESpKU)mEWxhU;p_VU&rbQrLcuz`5a`m1Jp^KY(&HjH+K!tLy zRwK#@b&){?UBAKHwx^DxIpjb%cQMK^u~}JQjYjPb!N2!KK=XW#FxdHi zBC{+CrZx@WiN)%tgqBx`4vI517NK|_eY`QMmj7w#A`=qSOG!EEb>_rEO2z|^ib%ry zVDTCSixu)nDVa%e87-ie{o7>EFa+YX!lIA&1~>O2jRWT2x!KKBn}wSXXMbJbRI{r@ z#$(m45`%98ZwkmSaBK*NsUIAA5^v#+AiK6op<|Rh>#AJE$HVb}(}93WsBuwd?=vJ4 z{d+o+uq56YZY}((a0F_L#})cBT3{(9O0V)r5lYg&1#>T7q~tRfQ?v5dP%la&-Bs25 zQbLGXzNVN%Yc;}Dr0XDSdliwh zLd-ina_stzFtEUq;Mljyq;vK3$I8LH_nth+zUF)LAGrOmeaTXV!rp&dyGQ^p1W>UtO3gNckdWlIb{yyJ14)6Raj#YB3~GWrq_g%p6QOMEL^ zuis%(is;vv_fkto5+;b=oDJJPxcNnW0=_~>ga;g#-x~(BMc~m)1ZiPg-PE{Hi zq0xEFJqg$M`056b4NVHeRC~M8=L&;YfOn`BDUlXGE=?W#r0>1^mdhxO;qOVOjSDcM z(c7J_mb`C2ojdOYC{#o-qA_s{9r6Cq``!94v`+Ky@rF-WRw7iDX60pllEk=QoI=G& z#2ErIdJ@`mKF?bkVeiD^cDIvKIzje%&S|ArR+j5g8hA2=CYW?>V> z>axlmyiW7_8A4=Chg*}SYb_VwlY(#I^c{aTs$|6nIm2RAgTf3EQE*ugAw{QK2Il>9 zhf0OtZEJ$akJam7HQ}DsP>4`ASquTAc2(|yiOuJVmpt~= zQ~BX%rv2AAgTZ40cz^tp zm2M`s+XKCh?>6a~G|RLXh5V@gPB4Mm(98W!oC7e}{ebU~_uFuhN1bSb9FJ@|glzoTl|my9xI9hhdnc`Sh1TacdqIP_c0U_AWdR6~rM^<$ABF zwaG<%w57}X(l~N5u>_tSy*bBfjRae>SO+Ve)nTEaYNh`{5=ZQ=i|gjMMHPavBF=e7QxFs5cxcP#g75*zY6nl6+zlC(*Nal5|kIb`Dx{$2(h3=>HC%dw2n8`v3z?ijm(ri6wtfqOkk{ zal`HOY1<89;8WZ#6sn`VP`*rDTgp+E1lb%B-@>PBeKIOB+x*G5B}jrUAPh?Hsl;Ju zX^VB0<1Cf$sX5I=ofiYzM8m|bc}OorxlnohdS;A73=<%Phy$sr#`2ezVpiTaS0|;n zAHKNnfIl2B@b@P53~B?BfrAsTn|9JF$CvmG!PM%z^Q}^3M3<_f+dMyTq0zLb^7n1V zq~`rD9M&iWEcyuhQHYZ|+VlLc1;BMs{km^`Vz~4hodnH-Ko`HqTcMV_XlUu%t1& zG(BAl;!^|^97QNSQ4-+CIl zH(U-)mn`H?H_{k@11aCXHe)Cyu3#-AVFYTIL#kA{>sX=R39^i5!$Pi>)BsSLtT*$lP9ue z&8Qn!&EMZCmV4w6)ysXGTdaF`O9`f; zB{iz&KX`8|Un(IXR*WY?zWKGR0@R;R9p2_n%u;*rzXYePf^ctgR$O>u5|_Ra=w7zQ z6)1#d&^VOPY9+)=1Kb1c@OGRO**XVYecWP9-P`=9W`Xk7(q(dQ>|Q^7S*Vc0%*=hq z$KhW>euvgqIm9E3>8^yxHiQ@|7a&a!L?`yjmh+NGik}neDw~oLelliv4`>~ElmOK< z(GcQ%gcLCw80$2U&Xvl>m|{)p<>!iIKBVs1`oQtmDvAJ7 zX@n4m1-V+1RcpYJJOa`LlD0o(eD&LCCV=fee{P~t(*~5n0_-uwa1kTqAJ%K6ctZtP z{kC8!ct#@Jnwa8aAl2(3$t2DU*XjBES)lVr3y77>YU#0J%XTn|3ZYpKkbD)yq?>=O zp7?+a2n<)vnk$L%Pz~Y=V7Aa4pu2_3$Vk>!U3Ib{{OIf=*3Oy<|DkR(h>q5fIZh!7_m*)^Q+1)Ae62FyM?Ft=wjLuZ>hI9C214O@FrbYJ60*x$#|4F{{lXk``pL(()lL%N(gsF&~r)JteW5Ws;cQA6#nG|KB z4%F>>w6tWBqTVDn@q8LGf^X@Re*Fh0MbxuD@v=JwaQ*e~b#m^C{&r%SX4L2`e&@bDV~+3742QMq zR*NeaX`l{_PZdqN0=ZYM+1=sn=NG3Lmn0I9#^aVcZf^uvX(x8Zt0x@4iid#QgE-S^^c zz2aUqR>KwaI(iL`Y$TnWO=*AypVYtHrIURf@_2|#?-7s=zJSE?E(gJK0oMi_Z^K!X z>+jw)=7@!3lNSP3HCAV%E~|-}WJ%I&JQlr2A$$MPghcz@cnL)fB_T0_k(c)=JBE%QyiZ~`T(kdnNfu#9j?Zy>IWMu_*(@`nBgv1vro(pj6GC}82E+w(KnDHF zM7He_17%|FFfFZ}ZR9k-JS_j#>x_93AzGUYt*z+wLAfkSw8lh;*1DeII?|sd&+^qJ ztXI4a*S+3omMPSrE&7!JEe0f!fT4t5M@+*U_#!c-4agPRClFVqKxgIZSXtsC$StC? zG{1ge0>p>0#wTVn=!Zt*U)0RVA7KWPe`Bz~cxo8(&+5{bMS$<@riz&zmMRL66svs) zt~}|&Mt+)=)(-tc{&C3wIPdB1=aZ#*he~WNQHLe<)tDNw6e`vkbb*<$NizA7xo_a# z1v{q*5HrCS?0R>lCA39Lvjcj>`HO@vDJOm)Br3vL=0jR%fHQ&Yyh0?6(Qp-s+|;gt z;{MR1dzLVLA{)-{Z#_ymqOupfxxs6TP{t zHI@$t4DA!#P&phH)xw}kW6^pDoCcb!^W!9Li@rC$07N7%_+2Q&WpMxN!<@-Xv)+>M zg^H*XrcmP6w}RV$wdwD!a^VM@Rou=GIR@kv_wp4`>>|a0jP}ZQvINNlRpdS33&|iE zDAwE06h1rN8vEew0pDZMWPA@_rU|%MHm#-Vtpaw*g}=J=92ZOj-ZP?}?tAlWZ|y!453EWeCvg~w0D2{pYbzUj_t6EjW*OthsM~2k$TR?;nGUG^7))J^=qH)N%sq4R}h# z7<-neKUm<$nqC0Wri=vPi(Va#D;SgVJLleG#@kGK9^x(2Dp#J zq+X<-u>G8XVRgFN3~o!ka@(AJTPpN(+Dz+i57rc(@e$|tG`H2ip%B>XJc!mxi=~0$ zLTZ6XD3DpALeN8W9<2S%5i|^>I1qGhTz5AJRD`GiN~tMnc5ih0QeS>x7Pt`kFvsWM zdDa7uhrqs}{cPaz@^aV7U;Pegl}Sw+u$*_+2220m-*6lrPyhw$ld!p$F?;HK9GyzG z(9@qgI`Z=J37DjF4^FPqS}&yqmL*k3(ddlM4vfr#@ zEG`UlZTDaRYQQO}9GhM};pG+ZvJ|!pJ$$Z?KX0)>1f;OdYH|X+rOhvNenkGW_dwEK zBn7T1C6{SPo6$;n!+c_n-?_YR2Q4POG08}0xY@Unyo2=@PH-(Hd2!C5K?Z-;b zW0J+v%9dFJ6cw#m9MMZv)wCLm{*pH)o9>6NCDFvlRBPq!@T zCD?b=E>Zk5vk{>{STxQ1X!Z*`|5eLiPluq45lXC=aA^9gdw&Ul)wls`NTI3P5nkW4 zrI6u!%IES&&(ouCk0g07wn{5CAb66u4q^EFjDZXOHwGtu0Lv6KJmvsQ2;V!*w+ zPJ@ikF?oIPh2s2suUBfX6m03VrA8xCVT*G_`kM*&5sEmm8ut2|&K)-#JwcAS8EZ3C}M#U<$`&_~8wxuCCBSuhb(NCPgX%YnBy(9=< zZ|GyWDmn$Z4IlF0)K{dHNWJLsW~IQPla*?q z?tAl|2$OEz8=v#d*V%hj1zbyh@TD6egK*^xltc67uIGJHLbE^S{OJ&t%*7=qK^ARq zX4DPJ{n3Vu!zg|W!-HM7PR?fU2X-?M+D!59Dkd{hDZjW+#Jd0dqgeGKLme+XAOor} zI-ZI^sGI-QhJH7E{DD%9bJBwE$=3rQMQN3@N9n|ZDujFMrDZy`GP(e}Am74HX2|xG z5IO%w zzPvuC&S#)=kXi*W)+umpP8egdT=_Awk?7PynP6w3BS7|jV^S)gUJa8WVTBN#;(6@N8=#R2KRb$$my^%2 zI$sz$5<6}0g4Kzh9+vD&+O@(M_!!lX|2-@X7UoGrwLu`)E)qiR~$$-5h_Cv zl0~na`3a<|@`)(U&7jE7)*Z}M=~Qw#u)4MZ)F)Ljw3s|xr-jHZc)HiZs4XyHwWJz; zcvc+y0ceFmRtT-5y?e876@#t{s1s)2xs|$ZO~|`l;`(vo1-$=s%+jFl{Evk)kC!a$`2hm7)28EaaGm-m;`K_xY` z)ngz|_DkXj6MjOp7UJkA7^dy6!r8%E3W7;P&U0eJ z+^>ORyjDYZdA+PX1WeaRc0Hq2nCheo%sZ|Z#mO8k`71w{YN@Q*o2Pn#q5}m*x*OZ>ooOMn*U=yWL70vH{ro{z=~xV&2Z z=LpYRE>e&fIocTV2b#(VB)!P7C!7z={@VWy1OX~2N_{5z-+#835_~ZQ*+}6F(ya?T z8X3mg>Z3i;=6M-j2f(z1zr16EE_T|A_=hWbck%NM48OU=IwS6EfmH73E75V?>J3y;U2c?b8U8 z+nZ}rx4BLAt<#H8LZw2#Ny317>pparu)e@_84+X|3hIDfwem`*(4S4OA>;{+(g?ctySF|oU^GK1~C+~d*FA>$v5Q&M>!ms5i|1Dwl9vcw+*G8(l%YOLyTFV}sG znTE7(0Oi75+5k!8DZoU<$2!LI7PKbycf8tGl^8+{!_tjvb*Q(YBNeBUV~N0arA?0~uO>0OlE*5UG zou~KXQR1q83}oC35`{~y9{6IEtFJ|mQW_Tgn%UgkH^>vTi%tCA0uK7tUh0jQ?DI9p z`5D|I+(81%AV(s7#=3M5(U1YOAZeh(BUSQrM z#ayiT@KZMh=Wd(CSt(>ACXkYt<{1znlY3`?VVTl|DZ#J+GJBw8UVGqPWHuenj@(s{ z?`M4%<$ZcThQN4;bL&#eGoZ?+xI^e8fF`0PA$0k=>WxhqS}>LnU%GJDtQ6EX@NFcn z!9d>;D90(W!d~j;1A=(V%?jlW^ekFi2YVeV9sOmAqMV^kbBITd`p(I$Fyh*zN-w=y)Z?5 zd@8z0${5n$9UYgkeId3RGa1OmwODvtmgjz~qc(9YL`WfE_!TeKHeUtFQKt%IkCGuX zMB3H@syEUI6RJs?&q(Nt_63;;kGpZf38z$?b&Ip+d1&fIti?r`(>;LX!o1-e>r`J? za$ZHhx`l%HN!P5Gqno{~IaecN+d&Uj5C;8*4Ta@kZNL8Sz)naGp(PXQvZEP@q8t>; zxTGcMILEJ5hZ|yQtE*(oEIP>96&Y=BO1ECIGSjm3CF*aaC`XJak~S58f}xeJdAzr< zpt`DMC-)+aq&sKN9&SU+zdIQ=;6C{$=&fAcp$A)s87?S+<)&^PGFv4-6 zfz9%+gI{1D0gd)0LVl%bplO4^yev{UuYNo((ReQ!oakmuNccU9>JE;wU(Q?4b^*xs zOXQ#J5!7-}2z~p|l}E`=ZZ?NSXmBUl|7k7anrs0Vda$4)Wr?1K- z&2<2(cdfAi_G^0RM;;s(+wCg--!X11j1%PM&5^udnm$u$DpagHv>>t7Q`@N@6ndpI zk#JZoyS=rhe)EYQ@7a)O+Adj3%qMywvhP%IIm*Qb%ZtP1qeLRSHU<1@BM&1s*&gyS zaPZ43Mk!|l6+^}$kN@f~1(E9Hm? zuPfyM&lFx5X(}?T$Y8v73hG9h-YMKx$w{g8_S5tijlGc4ByyrDRlMYlS$qnZVn5&z zclA_caPAe`ye}hGCd8YeRT!DssP*k^z?p%It+es1NPeE|9Kat=VbMzUnD>&kohmC< z%@Ow2bZlHg)u}Om4od!}aIfLc0h9B5|B0XGx*%kde-zfqrrr?SD!OZ-+;4Wwaie){pb!GX^lmKZu4uD ziWS#;jY^Mf_7Q#!01QzbUe@~w05tQt#wS~quA5^;t7KVdAu+(~Mu8CT^k?#xPP};} zS)`Of&GF$#ZGVZXHAlcj1{iz^^l|aGp82f6O=yE=_xdS(r2cnXaq>U=`1-Io-0?d* z1f$TDi3VSE7eAMX>eO8aTqG9lb@&X* zG(UGj068SfwN;>))xBzi7fgBuZGg^({Y;HKsN?v^~=W>8&xUGdMPs5FNP*-$AB(Mhj7r>dUBRzBq$|lGp;3yNo@5A?)c)Tl&~LMsX;eR7HCt~|rK0Jg zv^OId{46yw&}Ra{|G?+$za#>X>M(xGFg{z92qit9CxzGEgB?q=Cvt75NN_$C1qD{A zc6tAgZv}D8vhJs0xlRG}Q*!43Ey5+eG&r!$xjteD?9~J^b8<#%!XbgcIw)|U;+4SF zeV8poIx32c*OOb^baQ<{z!YRP^zxonY%+)c%ue5s!kU9~)GiXnRvHj@W|}=JUq0q_ zWmAd;g~U`EQxqZCRiG-38cTgRW>b*}KEjrwK$UPX_kqM$BJ`pjQ zyu^^&k~p0PQdcjaEmUCi8z$%O5O9)m>kD*A`WG=+bxfMppB&_3SuaN5y#4@iQ9qchJw|2K#22w?D)9)0-77tDX@K?6r6;+)R<~}5WpklN zDx)uy#)`&BWQ<}C)I^Q5pl!Yv3G&6kb zj1kdoh&U=D8c+u5Mk4rZC*<}H2Cb@1KiZeG=Ns3GaO}-`p47!y=CCWwP0~ls;>so&ri#nIJ$obJG4XALy z^opG!TZ#)}wAdub>Z@K?XbgE?c!cIB#CWp5)Hx1X>Le8#46~LA5_{@+!@*oSuJ^NV zujgU!p6o+N#~n(&gP5$Fvxr&$gYCUSshe0^U!r>z4zxJdE5PitSXI@F6My53&Q?!- zvtkN;^({>N1v?jnXyB)DHM88}?7Y_A1_K%iU4VBhU~yZ1PlFN4Lfs)4+I7}=q3#7( zgDY}vp%7ziL6Z*1%v-y(gL|Q*k9!m89xKE3lrx}}MdM z-h3wrHI5GTleQ3N zOQW-eJ<0HZp*oOUN z&5ktG;bQDSg*;&)hVN8}9Jpk=Zu7eBMeTJi*svkg^ry z-^=&bO-y=}3wD`TkL4zoV9MSBipLqlaVRx^Y?+BrUpn^fM{ z&mLSXd+tb_3z0lWfyfV(9@X6wR4R!d&@Wv9x;M9J7xb;O8Q|M&l9ATdBlYm|A?93! zOZ9Sv(MDZ2gVICk*0_3{SZA-LG_#s_tH^3^9DJGXEisiw8QWcxth}|K8A~9DBGk42 zKqNKD-Rw0|^u)v+YC`FK^evB1=Kzfgr&g#Pt;oi4Ocf>V>+r%To+BuY*hg}9$shT? zK4mr|FQx(Bx|F~m{V3a3DuHns*Lt@8f~ETb9GXPWlpE0Uw4}=Iuqv^4D2=gFW_Kwh z{j^7Cv+>eC?`H|VFBZfZxF5-xA?}(aMR4#1$P27`^-7_$UjVp!Iu^z(sDVC@gNPa% z3W}|AISJ5Hk?Zru!!ts)2GDo$XURugS&`5@V*|8qeeQK+tb8iHNFtT!dJ*zs^a8m> z87i^V1=w4W=8xH2tm!(ZAz*4PCw>A_e|ZR@6fK;N>PBF6+|tYrWvj-^Ax&Naf~4lz zK(_FM_&CG%>7w;D3u@c|bxO_DpK7ZDwV^+ur;WfMZ8JwQB-eit&9uJreqgfS4a3no zx;WS7$?~ktNUWMQnj>3Oi=={V9W4jeEKi9iYT>gfq#t`gu`jtE;6CXvehPFN?q^5V zqi1G69?CozXS3Y1$`304@G554_MM}lPlXvxdx3nL0pyY!;cfF{@Qhx1b<6rgP?`UD zRj5??W`)fq%u{96s~c);j2wtE3#~<6@2;>PeiXqNCZ`GnNlqz+k^59Z=sqU6fhb$D zWjv1bx2>&=D^Pja^M$CU_^*vKZw+E6`9hpUVM?6eaTsR1=@pPp%CV7$yjdP)3jO7C zWhv&uD-qgNH&REk5+}hH})!VH&14`{(qb%2g74Lerwecf@ z1JZbro-A_i5(3QKMMy99a%_lDq=$Od?s0omM^`5;d_k1KBChm#$Fajrz z*M?IOkxafBlGM1&aUcbk(V&fVHqep(VaGmuGxNuPZPjMMZCw!^;( zzcUmuJzc9z`%%`r{e04NT1MbZkRJ+tFPa@*H5Hu{! zIk!Yh$%`eiZ6uShUVS9LNufo<0||Z?mp#;V8KfbaMT;0QZX55HEJ|}r9NUwOrko2# zb=C>R^cR-%ajso_NtUXHe<Ch)V6u8gF30@=+~?K6uO+aA%@e;_*oN>bpu^*wISK zk9LV>%DU2uW&s^z{8LKAN}P(M$q~QB$UtH06#7ovu0D6SD6w@?WsCt|?7RFh@00J3 zEyc9E*frpq&T4;}%03!fz|5mpmqOOaPIIRqkGU%s@>qAJau_}I4^hQYjQ&ER%<_YC zk&Aj`3E-0_xci-pg4nERIm@>y(v!1A{ad2%3UHekKJ075>PRo;Wg~&L%bE^l3Bcv> zcIpt$!GHonNTRbm#{GSV=^6_~V}m$Qq61aR#K$we6tQ-B(U!l5clyT;ut}l=OjnStKh8ViDj#TxTZV$Y#ojmXk)8 zXrC%KptHUk^1Ht|j=6j_Vfo0D;-xJ%iPP6*VVq&)d{i~u*fiKM4dOQR)Wq3={KLM$ zMJNlaEv~*bD9KndX*7?EC0R#Ja$*Y7wUr)+ z%J&D6k=fFLYjIPuIYAMojXD2%>5>TB;~Q=`2L)+l4n~0Zve8tI`7w4FtA%4165s>j2L|x@3v0pl4s)3 z+*I7jNSFM++4sGApL4gFwl>yK_lMnSy~Kn3hb)?9oQdU?afmcR1%4k+Vt86wa_07< zNs4je;JI0jeBgDg->DS}GRGXH%(nBl5vpTsOy2uEnX)ypoez2)%swY4Co9H|*NHVJ zqN^3p=Po(-om0$)RfFnTP*5uRn0>dW+G#5_ zlv;f{(ffmJ85QWtiVMMg5faH5MN zm=DnUTaK0@!;Ae4%eV`=6d8Sk+G47{!`}t`iPy&8{yc&3tB~s3%H!qc#1}|s-#ik? zP^LA?lg%S5pAU#CR;HOuDt?W|m)s7Xo@*3eGQR*XIG(zzA{Z3_`9aXI6B77JzN8Ha zZ|!wa)2iv#-p-N+t2iIp-9rF4DyClk@P;nHSefMz@1BhOT*X(SKk$I!;2z{k8#Xu^ z%nf6qN#*#cO|oJdXVij{(kO>wVmdI50rkOH=_cc=|u*!1ahukIStE!SP+ ziARmxY9EdjiCCYnCgHgIz+DNg^dUuOhu|BaZ*Pgn@kFFFL1Na{p|U>Z9|Df}K(TS7 z(Rz1@{rsb6Qyp=&gN;8xxy-I7TL`iC$kw4_Ki9<6(`=t8lOd;(`l*zIII0t!d%BpSgdR^)jGs9F5%UCmaDZGja!rw{D=>HE>3 zpC=!dz^yt9Z;#LZ{l5StO!XtU@aNMW%6;4WG4$lcxG%rEd<;ccc=s+&EV2H;9n}y! z#@F_rgUMefGiV45;A_Y8=A9zbN2O|rL6?H)as0?8wt04;0l?4pTAFb$Ij*J8a zsY4G{%iA4jabF^2lk-WdP59#GoPBd38^+7zIM+O??p1hpx%~5qJ7;SlNG1`t8*V2O z>nD1-%zngkST{!;R0@>7C*8BMG4xgCq`}dpRP={hCye3BHHRm+nw(H+`j#a zVUGu#cA7!vW<|H2J~;DrEJMliPX0d3g=Vp~s{Rbg(?hH|=($YgTc@xpcQfMe0bdtD z0ddN`^w6|wwryhuz3Dh_UMgD+TiK22s`BUORUvi_ApGhbP(4$7XK$|Q}TMVC+ZJ69Xgo^{O&h?2F9GSx5r^-rhr#E!u<1MWXZMZ zo%(CMHEBq3cMKWEIgI`gtxcQ~Pt#*(Ixp*JN!EOg1n-2#ClwGaI$pae4RNFQ>kZb; zBSGQFqZlKe4|Re+M(YtJNW631%$J~kk&r`hm=3(xk^+K$%apdDcgsuRBc>KsD{@6Q zWO6iW>7NBf81}n!O@Q~RR&6Q`e*+EZYMRGUy>~BjDLWP2fg8THw$b1{k9LC6zle?G zd4G;V{*f%9;$+SF_bbcK24PRw>GNQX)=Mae4xC<2G9A7V@9*&17UoMz7>iTJ%5!qW zRJSufE}*Ti97n3Ux;XLB=NG%@NU<7t4$LS;n29&r<6^as3EN}ZOjY0RckM`n!@@Ha z?_xJaM>kDiG1zIncaS1fwLk?1yx^zcTr`qm7{Mt5&tf5r|($pS(TBX|`;b^EBRW3ap@U)@bo+pgh2 zx~aVphy}5RukQHr5>JHo>T>gnIXYUUe)WdD=l^t)h`V0&0GmLIMXN$%+sR?3Mp_xO z;1bu;wWIMVsIlB)s5j%rOL_x;+x>B?eQ6O&u;H8XT#DLpO+CJMFLS<)0R3*cnA8@t zsw($olI7Q3d~N4F<8{)~yEmYEo@+rt8@Cq+jT}9R3+dTQ20kdV=TDQd7oYOeyWi|H z;)DP8p;83Lv!`8YvNV-?sI|6&m1>?qrjA1^d-E$>y^U_23$D_`_t{4Xw;wuIkseD( zT<}_tv~kh1x&xqcWqcly7|Co zk6>iDf{dPQ*#=9vDY}j2$SgV>He8(0}Caf2O9)F(( zk*&&8Ad%Ygm@1wtKV8^^zq>nqUvYIfJ5g|jnTd(XG-?F?3T1mvPL{xr5lPif<;~DK zP$MskS(8Xtm5XIE4_zRp$B#%uxRiulzT;B8MQ?&zLb1-JM7zj<^sL4NrJ}QOQnhvP`Nc{E-WEb-Rr*2j?THz&*Le}E z+TvEJxq4?-;#bG!svDvY+SR5%w1TY8QxWP|_bq<(MZdc5y9;c4r!UDwCnAU;FG87U zQFr`+3+1RY;SH*LN^bHw^f*Z`S|qM_@q8MPXgPn`9vU7nN!EKaDaWc~9q zvt^R-$!a3C4%-I{n$(F(j*1A?#(T_&O1j1$O?O>Mk0H13B8(>hP>X@;4%P)Pc)?Q> z6sx}7w=1Li{-GObPgsgD*o&o+b4C$?{-d^1qP#{GlA>&Yb~LV?_o={AV5N37HKjX& zW01Lxw(IDOi5>nWkAj$;POi-l-8P`ZYi8Cc_z zm#h>Q>QYtWQHqzk@mVmMge2(4KqjxEBPekat@xJUWJGaofbQ?EzX)@rZKv`FrF}Bk zYh2j$Z9}u|F)JhaRJdqLQXdK}M&G<8CJhz>+Xfv`Y5=IXDQ3iK?&wL{r%)M#``@$^AS0yB^Yo(>x% zR1_N8b-GTjsNh#cBPeykZS(#E70d1s>dqaks9mmvfZI@*nl>m*Fd$2v+qyr3_woH3 zf%;4X)-$*>&uukSHqZeO5@mmJw&)ah9!mWX=z@!|7?ciIL*`Y*|KsT`1ETDs?QIwb z7`j73x*HLs8)=d5?goK}?gr_UkZ$P|grQp+B$N)N5vAe(JeWlJEs!VD%ROCVu7g{r=X1n=L1Iw}BEv{R#ZD-crrR0Q)i;oM zh9K)^u&%K?GZE|FisQqhiP`vyz?$quv>HYQnbox^q()!|jw7(bAlJdhN(o(Qb=K~J zpVDLg1Y2a^3ScSo5)hXJ-;156q6hTea#ZuvO@Hqt+%@o~=Zb?UK%D5a7D{VraqmV2 zzKV~GMXRZ_MFivLL+1y%iWcukiC}C!E3}#r{`_opAf-cNl^DjjxjnpzbX`tEc-eCV zY)QcH%vM%i^9I#12p!sLKb&WD4+GV&k7Do$8J3Ujuuz>9O#8zZW>oB=H+X2%aD#j~ zV?L5Aju5&DI-4q(E^j#B0*%)(wVVkLz7v8h6m}KH0rWMnni6>xjczP<*Tes9KZyJu z<7eNb@Sivqrrw%X$CZVtzaQqu1>Pus5;e5dcL_9`xh;r%;ij^YtbzA>Q=g%Uu0n;H zGdn5>k3|Z`LPolc(ad_tH%`+gV7mK^!D)mfQ{1HD;ZsV(#+8hBi-VSTyWc`_zVwi3 z@p~184H|*ZNga|p{8{PryfdrT0;oSdzR{RSDdo$dGt#A9#JF4vv2;_rF3-5D( zsSB-jTsoXQ1gkpk(}>wdvfF>BuNniey?Q=j-f=Avs;ocTdW!m$xn02})}#KbSGeGQ zs%n8G~NnIh3d1fom>E8VUnKG3znx?FNB$Z5zv2C*PIHKdgm!{3!DyvC4&jKo5Jt1_^wQ;G#V`A z1w0Q)^mCvFGQiz0jphr+?Ir%fB{!Yde!va~L;|ko=nI)aoa5c=P1vTE;IXs)H{?A6 zx82tVgHGjx$zYs^hYN)PXAT14>m?*(oSqsGE=y+B<{(a8$fMD#v8`XWU8;a6wC%oc zL`4#0e7!X!B!w3*e{hluC=&BNfZx}q>9xDw(aoO7? zlR`;rnbY-lNT)y+sKla>#H~Txz_BOa8A*9lF;k$HK;NT9Qfdgc#7Uz5H|1G%I^Xu{ zcX5KQ!-$RYrfsN}pUS60?o^Z(!!cv94V3F;!u%lj`$+;ZL>LwbyEVImk)KO6$d=9T zEy$@7zeC4=)SS&j?IwlE;3Ti7BS_4lA1MUWI|}L7*hUXnATWr(nKVWd3O(jSq-Xy< z)(!kTM0Sn#Eg}zBTOr?IkT_k`qpscIlZLCQ6CTe?@9S%anEfD>|A>a^de9ZHDihfc z?@hJ5dv~4*892TnJI?fK>H9^|C7B?69wkVDt2?z}w($ET@X=QZ-CII4Vc;SEv-eT+ z;x@1Z#g)kk579xmvZG#6t8F-5`Mv(jfPQ_dHst{d{%b?+01)C(@;8;V zXlYNr9ih{D*6C|912yGJPEa({jt<}pBkruC7Vo*kUR!!2v$q>T%eL4=5}c2e&Z*{4 zoQPIZ!%LaNb@245wcXta(Kxh;P>&9Q4HHL{Ii*7fX@!x^nvZdgn*9%6wp4knl87`c zzFVIXNe*V$50+y{GAtiA{JADWK35Z>vbg7-^4x<>WtsxT3NX%pv;1GH9VTWN<+wB# z{U?Mb>-u8iN5;B!@+(J2*KJitBIS$PCbGL$+TcrdQm#MPP-IYL6)8{7pey! z`p_7QcyKE9=9{9boM?5+e!OFDG&i2h<~>%%D%P+@t}8~JTwoHevH78Ap?GUrj6+A@w|fie}(cK+yt z>i$`T%!=Tw1kK+&Ksb!bI}80IA{dYxaCi15SeKe63OsHoq1fn!VH;UeFMyS^&i(NF zJAth?f*H5f)HnzC=YH+g$;o=D7rUmXJ3{_v$vH}C41A@?rQ&)iUrvuWGYTFOms4v488RkfoR^GVfuw7|A8FcybWaA-c|l^R`$5Pm_&V`#zYt6MBI)FOcEHJ}D+Cw0H7L`JHdeGwZihpQXE=CV36xbUnwdjH?N| z)f;e_X|^C5$_@_i{rOp+KTE{dv)mBXIdpPSng2&IjX$-;Ho%7!lfyg)qbB3mQDoJD z%}0}<47BN}-@YEybQFW}_lemh|1GEfGpqaV_;U~4d)p=q;bUw_z3j@tCzdu|h516L z&}NY{8&ihzgZtW*_?cKuK{q|iDnXlI=uFh{EoNbg+z-n#i|gD$Bw|CX06^ooiVV_+ zkivN}tn3RUU*48AuNtc>zG5fE)`*m~+MXX1h`odoxrq0(+c0(}b2UOd?AhYuR4(;V z=lMXFqp3#(Th}TE5zxwJV=4y>U^o|jynBBQeC8E2%kXXVs)`{Nhx{Dilf;MV<7hpx zxzex~mVyy902C}rvnOs2fXvct8Arp`s|6tM*A{2X`vuBq?k>Udc85Q z8z~skrQYi-GduxYXKlZCU_Cq8=ENmKA9Q=Q*?znbW(BSwrlLQ^TbpU0{95{2i;b5| zX3jy7q=tOORILt-pbe&d5vQ1ABJuP%BBgc^nMnCjpt<4=_uQ9(*xW0IZfNTq2}2+k zeQzH&;-Dg`oikh94X}w!D9)Q1IxqxzyvH%Pdb6UwWzV&z0%)eQNKAFesO>2 z`Pry~@>ugKG+*lFX6#OyPveVS{L{YT563GW#?GQLyK*2lLkDrO`EOK@?aVE>UG&bi zlW&8R6PrKRx?&1~1JVe|_2xrgqN8ms2cDgQk_scTys!&-i{6B{JTx%zSK7CSBk(DE z9RAw#C1tt@AH;{M*8FV9vqdQ0r~|ldkya>5ESbS8z@rY5M~mBA6~&G1dX6qTnaNFy zOD?=<1#xMtn=3>5+k;M))MysXYxxpoj05E*ZDyp`k1~`{f9cF#leNzh#Hx{Cv(EIM zzcCd3s^@qK+as>5D2VG5FagnH488A~ojfy3#4UT-+TjO29Q;c%GS0u&J`Ohk6A|ug za_mZuIQEKNuQ2>EbPaDj4v>%5ukHHc&{zk714)mnyq(=P7%7(LBYR!j2c$83j$jZ+ ztPB^Mn3#XwZo2RnPyuVA<99La*pN`$G7!cq*N1+Ght=B-#1P74xA_#P7}Y-GWlu*3 z%4_iL%Squak38Gokv&2=Usv&td6hS|u7U~SxG%mthA7s-H$6V+kWY-N+K}sxS@50m`u`CKuYmx*vp-$r+LtNn@3Xq+b0#g)wrEvrRBjGT z=W}{iM&+@TzY!)%*(K4Dk4fOP&<10?CN`a-p0}vac-el2k?g}Re_w>DkT94|)v21e z`q;O(`eo)i0BgT5D^B4y*XPp^N8hMQ*OXXP6LuCG$diLn(uJ1%KjGe?Cfiv*{%O+B zOVlu$)A)>JKIgw`xt&JDWBz));uttUy*or-Ba+Vh2<%zTQokj95syfh$r}w-aIJxHE4yPSX!M=>z$MSR=`t-0m_>yz6ZG)EL?(=$5ld|IKO+Db{s`p(!8kWRy6}ls|Rl5 zO|4p>q@xcBc){zr0N(3Q0)eip-%#&l_ zITGl4K#}#QBMuNCQWwDe!9DH!sucZvJq-4prHN{k0J%QXwLh+P5f6D_--cV<`il}m z=&SLdo>Iz!+KmIV)>DW&*Lc(am=1IhXu@1K}?w~G=)dEC37 zXe{W~oqyQwzx|CxLg7$~){dZDTm^5kQi zME1l*)DI_e>7gR3s_dTCSyQc+=b zPeu>WN3<{=xqjXp@|$2pIAT?z-&oljlqVCe#rYEVO)vXxqJZEB_)~BhR2>laN?@X7 z%)}A-zZ?%UHA6Zo3rqkOcMbyGRa_fS`pIO>w4l^J%yI*vU^>fYZc(=0)T`6LqZj_%(@VRqgD+mTF zjW8kd;;QzUE&M=#mksD7R3R$-8f94+gDRa2O=LE{)upjZyyJzYxhT?f!RnmQWobO4 z#Dm3J?W$F?68+3AaUFTyHR;j*_wrPk2)qQDO+D)vp@FS7cT+>JfQ1tTnALVoj_;f> zg8}ME?}g8Pf%vYAL8Jij45)iWXlVaCm!x&ugN8m04yKYM@ql{2clNI>+T30c;X}Vi ziorAsWBLujXS{jzZ7{`vL#5^7$NC@_296)U^?jT$r39H$q^f zIBX`230x_4+2KB20A;1t<>#%LY&H;6@_f}4A^Z=QZ^V)@R3sCuNd#NKE?R-97d|B) zTIIs282j#}FA84Jdb}?Y_4E%eTm03bCET{&VC6xYaB3X#QL_8<%e6pG@EX3Hu~{BY#yxb*_CAb_pFH>`g;CNd~+Qg`0mz*$)~IDtN&*5QPFy%;Jme z1V!MT+xeIsBflHsohtu*IVN{|xn`mSxCA2<rX=_f-floO2%}hix@l_BBjiVj{l7&dIMCJj4!@feAQ$q7Qw> zv_KLxsobDm_b;)x?fP@UEmm1AZt>W!)Z3Z6bd&gLn0O)=j}l0$V5fQ$Htp3HKSgC_ z<4CAaMZ&y^c+Z|7U-al6hrk(M!_eE}^yA~Mfi%Gi@Lk;~Hf{xq0g&Imc9v*dZ?&b99_(^cQ+uv7W-0j2ZAWZ`*UXAQMpF77HQbOy*o)JZWOgdHotNRDy}}(cWP{X z)neO|aE$jQUuNeQ&ZwW8KpiSP={tPE8!`3T9KM$trt9qZPa$}K`SWJAn+Xm&T2)dQl#@Y*{nw{=Vg#k>;18#s8&<=)~xf?dF6P{#~^ax=Sw^Dpb z&L^|yhc!$F*k~#yRLXlEfmpXp+;da_M}%CU6K(zJD@YPksxeZ43ey;DgdJGHf+&+AAJ|L&6>Ldv z7{w@5sDAC5{<=27i6kza9j}=RHa(5lmR66yAph}CC>d3f&4ZiulTOX^lln~EAqT#3 z(j3v*&?xat4VTa@ug;^S!#`AY@c*DM2VG_sdpiwxW)^Ez3m~_zJMuf$1(R+xq#r@o zk+JI1hlwagu=+;l7P6O>#Fd2fhYo7Uy~!2`#?lhUU~kfe=$?>Xtr9th>MvG2=kul2 zviG9vW9Q00hIaaYvnRKUJGFa^361KCKDSKpRCXkit;(_~PVT1k58etRGMR=za%xj3 z8i=r2t(>{YM1fVl$A?yF7oV%@4?ipAANx$cH>=uSS-p3on2k#}PmWhf3w<4-YO&8h}B0o*Wehmc#_bSy!q!+4G-}} zekcYFhDCQq912ssyebyg`O{8UlXWm*7q51c{c^BYvYkH2l9Fhxr)$tg*nv1w+ zncjrjcsr@T*?KNc$IZsGPgmwTF|YUu$7mfPVdN=?Tt4-)&Dew?B?=*Qgz~}}w5IP$ zqjjdRA<6n-)|}1U2a*sc8W$j&qld72F-yRzbm|LtmK`{`6|D^OkKPx1>%3-a>tRsJ z%pY$y5kyRTLANbE!N`Rm6_x}%WGGrTL(AL*8okY7*DHM$dDL}a5h{M|Pv!XtI#djov(OQ|>R2M?-^-L1 zt7n)e=F*)-gdc>K$LdAdr%xqjpIEtV%Ld7hF|Li?lbt#r9?Hu~^y`!~m6J9547e(=YgDC2&3IdGe)Mym z!t@p5qMcqfa^Kw%BTnU?q0vE|^;0nJRf==1-A)ER(1I1UHuoJUz3*itq;}p9NbosN z*?HKYd@UZCMMN@j+U9#C&vr&s=Ro;c0R8L5^JZ^S8X^mOUH|r#nv+oc85!Ay#PZtW z(K2dh<2t8upr}wD!TXvGpMr2s*Y{3P#%;WsKW~_{Z1n49xWkBH>~&j#AnT75n^^L8 z<6J&4$FTb^{AX1`$X~^A_JZsqyZ@?c%m?osWnXB`CtM3AAe>~JI&Qw<r>~5Ujiu|Fq33JSL;}gEAc=@ zbdNjzD&fv9M)w`byndXBT~Fsrd@`j@L;O>2aoqy4e^OvN;Qec$Q^gS$FMll3dsVFotPs~Z#cyikdf)~Wkg5@sx{ko`L z{{)anx8qfTr}Wlbt?^^bD(#NzK+o#BFQn7BcMTF?wbc@pX>`en8JOW?pJ4rp9Y8nI z-iiS)fp^u24jCi9_#0r#`PXF>ZApploY~t$hgIsT^bh1OzdmK4zyw=Ol&}gs`hbZ) z(gitH05747Cg_rk#*8Mln`}G;Sd%~B$Z^p9I$q&T>4>FlzZlEl%m6>PV_*SHd@X)V zkBAR5VL&k@Fg-c2_SMbo2c5KXT`5+d{6`n_=fj)f#73&<2S6j&qYwchhbLx!@Jnyl z6aA(C4FdHYqe@Qsct~#NRNX($osVp1&5bGF&Z|q2a7%G)0cX}nWpkn-j4I#SHs9{; z1*i1C7RTQp3;J>7>{gs2{v(JlskQV?33KbdMd$>+%%AXZraGwVX)G!f?#eKtl{MW=W~zE z->0?}9TVYgmd%%5(lWZ!SV1+-i^w9DB43MtM?=8$u(k}D{*9|1V zika=nE4!0xj_RIaiPpFdvVp9a9qJ5~Cf7li=f(0VPXM)$5nhJ>@ZpUoOImjz`(_E` z4?xSsYxcPT6X;XAC3vd+*hw(`r#uu#0;x1AAR0U%U-gwx;ueNRhI$Yw1A&3Z4tD`~ zVMYLHVSAgV(Lt1Y9oRNQwW0DG%WAtngPyjg3_Yi=!L{f&j~4Vg&Q!_tyy7Z+`iex9 ztw!3m4(jw7icX&qMn5QoRrUgr$r1<|EOeAsR8M5xQNVL0ndzoCc;U3$+pr>qz7JY&E=b?!N&#YfGQ zLCAdxCF+(%dpY`!v!+6MKQ3=*%ZxS%NhcOWlHQp_v%h(v3^~IWf85`ei}7(}F1s=7 ze=FK<-OK9*O8W0wud~gT&pQxPBuTXKx%^)R@Z{B7cubXf??j{I7|31jfbm;<73YCu zSE%);PZc3~Rv9)b+=yrwDIkHss_7T_Evn9wXl0kRY6<`*Q@M3)04Xbu3T9V|YWu!U z>1aL3e^p`DfQIh_V#4Ww#|^8-tCr(JIshqOJgqPPop6xxImlJoekAm;$P2_dJ={#j zB;`$fqS_%X;0Qpv*f+-2Z8*+`FngQ_VZ0#k=8;^tWyvEeo{hOKqXm#_=8Q4|XrV>G z09e&*3i4?T?{~*hbAk`3k!mw$t94(Fq%bC=avJ|^;(>D5^bxFR<3 zgFb+MZL$t0Do5cCs!%NmNj`l-!hkW&g1{6H;~GZNZ9<^fwHgL?pC6d&ByM-VN`zO z<&iA)m^H1Pl@uv;R;PnSwJuA^2jk$f4B1^hp+5NB89s&mG@(#AxF0QRa`hNA*}U^u zQVmgZdHlfr14OCQD#XG*0Z}kTkC2sYJlNY;%LefP(2kiH?`=YHyZR}-tOc9op7tda z9JI;(YvNqE2W#DdO79H0%Hh)eK#ncqa2Rx;;0eJAC#G(ycQH8uTs zX`Nftf4z#9{ed+M>$U|0He;vsPN^Dh34L%dz<|#FS zMwt^g%%60OwE74X)p+-)4gUff4pPx9lw@%j#Au)Lt7;^Ua-Z@O0xQp>|56G4x|4>a z$?P8%-wa6|coyYwy)UNUJaOwyiBbB@)Ld%9My7zC$`0t!@4f|*u$g;`EQIO4rdZB| zC{|z82)|JIh$A}nq5uzh|5T!HoD>f#%AFA%j~!+Ytf05>Wa-J}J7V-%2T%b=4MsL`OGj)*{f zK0YM5q>q9&!AZBS4)Kbw(yS_lta%MkLQ|M8^m!j6WIuhReS@X`vUK6QMo#=>oTQ>1 z{yQ7jZ~ieVm`UO$h2|ydbTzXehtT6A8pT6=Y*~e}0K8IWtN)=p;Ws^NV)jAW0e%c- zb*`7RarAl)jy$~sqj>Zqf4<7WtC9%5>eX7Tg<8e8j*qi08F=0AL#guBY~*+P$;56Z z%>LryV3~ZgY!yS~6~r;(bN*Rfm|Xlxd2v^fi_EMcBFHm{UArcSfWttit~7R*e1qDc z*=AZYt4W~VZ=ng}Jw1S4c2*~y79umd*)KOPj-RDxc`vE^8qvRl)cmuRC<_TA<4@|A zrar&x2UVDCR~z`rQb5>?`9p+Zs1}U*RNo6!MGbOqHnoxyR)@&O*I}d#_NY`3Hz{vL zE0%j}S`tcRx!y@lAxPJ4pe@%YZq*yn=ik1uq57IRg-u=T1D@No_6FjEVZo%J6?Yqjo3kFo>+J9Gj}>lH2JZJTi4qXm7_Eg!rsAVa+cJN-%vfm9u|>1tqku;2ueLXu zAL}+A4Bg35ks^k%51IVLP9iAy@bm@$bZNsM-g7V8OnWLM^ZF*+>5IHm_x-=bcCrc9 z?UB`e1#lk5V9DW_vxyHSG-$o))mttmfNd}Gd$`_n8)I*6qim6~UzZJ*DVhuALu&?z z$$LqJ$^B6iCjGX291nXS;Dq#B8@L|7a zbW10Npes2f<^UXpQj|qANG7#|j2sdt>yw}S4b=Wun|vRh5@IkJgG!E^dc6tL-LW@h zzvFC`#w*$wO2o^OcL|y)1po>YUK>;?r&1aOoIGYQC1(H?q8C-h>^ZqOLi`nh9{Chb z080pw4miS9v(WES`HOJ#ueqf~rnssL_a)YV2tt`B#!a7$B@G^k>iQzw>DC z;ZFJU`{}IStqv+`p_oti=x$C#?#VEo0vvCJ$2^xP6CfsB;CqE}!o(Y83kHalk8%`x z1CgG61?b(^WvZqMAz9Ke4$yk2+k1ow#ATb)1u?mTl=It0KE*iQ*Sjtl7A73e*4Tgr zk_S<^b-$y2s@52v2Bo(+3txBP2O=!y4=9-EO=rN@jRUm>@HL!M>0xue5?`1JW;}q6 zRWT_YGdGtL6Yz3e7|V6eU-Tw{=jM-6(6g@#aVZPw@|`7sf_?|?t6YO7h}`zu?HlH! z>r(%-i)FQc2J=GN+?$b*jjN|tu}eM4&WWy8Ztwl4TNpU==kK^Xif3*$I`J&9s|mmR z$iygQYQczn)!y{U5gHw8hDG;;5IyPmEItxtcBcM{#%3E(8DvUhclj;pYOfSWU*^gN ziL$HM<0hn|yqxB$52;8~_tz;bh?Iv~1p?uc_(=5_TMFUui{}xG4%v`Bs_0W(Ek7%B zk#u42Z?G0(in7yj$7dVK3sV?P2$NlAoGDKN{P@xN#2~1OWk&enjNsKf1_^3NvP1;x zc`}kEj(HSn8xKJYS6m$i{g@8yi=e38YA`$W&0LmVu_{Uq?kL45p>lKxfB{=jhdygq z$D;%Y(z)sXD{8Q8J+an*OyABa_-L?3Gu+)lhNu5T7|eQys=ra$@7d?#j2 z>fKz$*G(~oiOZLi3~+wkF&DsX`wj?;Yqyg6=^<2y%Ig;{jRxgPp7rRGWVtVhtKS+! z7Q9NyQvE5f=XaL6WR}EIh6NiyChq$FB(T;17hgW5yhi%+PbavVGP7Cxa9+~HRRkQm zl3;azqIjK+hRKS{v$qP6mCD$EJn*1!OtFCGCi-dVWYEj|{rvr-gJt~`wBoZ?_VuDL1Y7hfe|vAQ zs_A04`P&kaL{DFr`O}du+~>HtCHRK=GzT!Xk*8iZ9bD^@T_bm;duNE7@#ixlRF7;5 znNLC;QrB>}+&o5%ra1jk-R*o8>DToG^Ai7NLt6|wpH{ZOdxkCIWb*%p1uY8GcE+#K z-E0qb(K!6-O&x<3F}wV90r3YCMHxKP0St?$>?fUnRIdI4K4EWdz5Z*z2kxIa94_9= zUwvCo8iWNWsF!X`L?RUN4f5~5=sGnxPV9PoZz5oh2N~P!)ZS!eU7glEXy&=jWB|7; ziw<4ld&B=OLtdQZCU@sta}fJ|8q*v3cD~wnFK(}BAUvT$L_kok#I=-Z^2#?c)|n;Y zlXiN0W%m2+U95@40IqB4AdZ*O?cb?I{RoIW^7F;Hb-Vo9l`^>`m}vkaDgEEk3Ttc8 zu$3;_1ej0K%<92e5D!X%k3dGUChv?>GL`#irlxQ%7qR5yTp}qu13^w&QEzFC@4T2% zw4=oG^90M*A6+2~)`!3C+)#~4WrL1QBRtAECeHf$k0u$vjm7=0wHV64oB?KwpDq%` z{@FyR?Cd6Sod5N6uo<+e5^w8`25Z2MzD^ z>02GjQyy7V$Y(>28+f9Ts8wiFHlh*c8ya|y453@iu6q^86*rrmofp3ZO4e2Om$iPA zaSoGxTC5@|Z(z;V*Ha}P6hnh}=*l6WAsQ%baAeo0C1J=YGWS-#QgbmUzO^8r8B>5O znU1K&2V!1c#7qv!%k+(W*;Jfp{DPA|Q>gfsgdsAXL!v%4t>AhZ|C>bz@R@rzI4WNX z>X5I`Y%(s^sg}n$h0iIb}U?s?CX1VL`7DZ!ZVuUqLEX?=?Zkf zwUqHc!Xy|FRjouv93q_lUQ9qjZ;sHfF;X=$!EI^0(GA`P!-T7*=%TpSDHUt>chFGJ z1^~EeWmVM^F!T3IcTg(a0I;O|<_Qs7Qc|L(p+Wa#{76p5`z+?1tHGO7&5Fi`Z6irb@2Ofr5gZ)KGqoDetR+yb%bDlYjGIe|SUxUGjw5fzl9hHWb)=i1NK?c6@ z7p5<9ppVKdmV{t zrmT#~pZ6>dfrtq*3`gQ;XL=Up?51vt=q8Lk`Q!wq8h*YYHcY6?046uSZyx$xaso;| z*j3A58KveKJMiBgI8RcwnS%2@;UoiW;+bNfSKq*-~F8UT*b@O(A<=@M) z3X;KplXQCu^)s$u44>sfKgRj0Y%Y6+8^d&5F^4lF?S)X0HLGCM~2B1UpC3ntcA+u@kW3zk#Kq zSoAfJVQW?6*GQ48F~r1Bh|PUCvFKNCKPn_)#wWUQb91A?w!V0g&X{h3BW_%FGDV3Y z>tIO=J*7q!({EKQZh-LDDH+1?nh_s1ZE7hE^6{Dne*KZ#iJ3R#L?cM0K6qPI?a~+gLArKHba7F!?_D~^?yZG}&$E>>k z9vhCP=sCo&DqX+?7r|cre1ghPh&mjEM4%`U(qYy@P%OSts)U-9HvB89Rh+Yf&F29=vS9cw)s1!_)|v%J7D&7^Po$uABOjX^_W2Tt zQ4DBKD8=Xt9RJ%6O*Q}pa93!tgF3;mg@HeB!nzFzFuPZsf3{TiRqOKG;F8h<^c`DB z91T$?p~(m`(_@%SNiK*I_xu0p1Sax|N48ZR{zI_m4@MyB&k`9A%bP35yuynd)8Io- zO>?k#ruYWrJA}r7f-u{jc4?7>ked4O++gX|Dp@#^a%gYFBMQ+=R5(3GimDh3n^uCj zD>Da&7QRkXB)t$6{;xk8|91xjEp!o`R8=j%yinZRbj`YKO}FX~_?>+{$vgE*&(^6C zQG(I3#P+rh8#UGln%H~wMfKxBQlKcUmUtI29t}dqfG{L;+q5Avbui#9p#w9Aod5Wy zFrt0+Q%MiW_Q91 zL+Z~Znf4!0PCg|s-=M4ATZWD={b~NCY<+9MaQfuGKo4KE{gG?O$56}keI;~27qXyJ zPa>r6yW8BO*XI69jW9zdR&iHYSLz?E$zdsZsjne24oFK6Axy|~^n<#lD9!K<4@i@Z6tit#8W={Dg?4U4Ozhl;i zqjK%I!cl@|C*Ce<6QiM_H3VEM>$U@*UGCuU%|{%$;(QaS;WD4r*yVx%cL}oga?RLN zQFi4@_Z@mI_C|`e9R;bYL@va(si!j-NNNY0yZA&LeZ)4{e0k)+XpJ_<_u2pXru?<1 z#V;_0Zmi(L{0EeqUCKxAI_U-Y3oajsp}n&1lw^VOM6yY#yJ?Sj?OWlrpn zZUb@2z6IOrqPJo3cffpBfy!LXL?%@eyd~J0^!DL_r`-j}=kL|y$FT+q;tuB+Mt2l{ ztfv?Y{db%S=C~_qy_l>h1g&rNDQ&-y@U@Fw2OlWdD)8Z@4X1olhV5Tn9#$sThKVHK z4TRvEwE`O#Bjme5vO=tih*=byj$uo7wJcw~hs|X7U;e$}2@1U`%FaRVZUR4nYsXg; z>@839I=~bg@FV~CPQc~B(oK1o;&jR}V=PzuT3;f1UhdHE2RH+4Z zM*;tYH5tP?TLo?q`bMEltXjv&PL=&*lN6sPIqhD2UevV(bc@zAcB$WNb3-Bl=JH-X z0iONv9rRWe)zwO0vQ*VrV&Bgtd?FxXt^KOq(Zz`Qy$Br`i+psO?b42B3)&8`nK_IO zwVYW~Lm9!fH%`@6ox8UPTIpqZKBJGA@&!nlUbD-|kh-Z7Rj2x>l0QF1tUX^O{!T_I zdDymHTK!|K_9)}1K|37*&WMn`W`1Y$b8}F)W~_Q_CJmc=vCU8`%X**jh87+3CBbjq|Df*LPThC! zKO8h)`MlFCX-H@&LQg4jSzq8UQKw|EnA3(V-LyLtT-r+K zW{-}HTF};c+yfAYdA)BAs3^obbSW^qTlI|?_+Q{Tj#63kR@D;Cs~7)9QJZGJJDJEj zgx#pAX4XvjY0c%Y)y;v9yVtlQL{+=IsZCjgYO*hbqTx-}M?PF*RnTbcM0$su#mO6k zKB%~>OO$Ig_m{btLgVXFSkK*$rT1CRy= zA2CPrk)A9vm6eqkc3h8IY#LMnlLt!4%0J*=METxDkXI(g%Co&33;Z2hy5t(nUEZz9et6FPkimY3_{9ZFa(h_ubcgam%U|6 zdAoO){g>}}jmjD^4!v|lz74G-EfpzSvheYA(RNHBg(2M7?9VoEe@3kRK|=MC2}&ob z9Y)!2*P<$8Mm(siF(J$ij;_Gmq|(v1v6OdI+7EbTpkFUgsS-S|!ljNjw#8XhS@W=C zICIZGi>HOIi2ko)n){`eZP3>N8j z`XQ)I@a5&JPfX!R`DERqj9(w_jX@VJ-_i&6JQ;^2o$=5HS%jEBtwI=WLC zsgLr6#^g*)#+0vVJcPuO@J}3)mpxXtSL;(Yvb(fJRhKicREJ#a?kDawNLNph_&A^T zK+Vn5LS-ukFWUORVrtAMd%ATfLz~Z6WrdZ)`Jbr1^ZL)&$=9{`m=;z3)r(@Fg}N%M z^MNYh38YnYLS4F$&4ah*9Q@^CMR zJNC=2*uQpE3?2gm19pM<-PKLBL4M#M(5NId7FTf>3hpK{xDbQOUA478kR%{I%V7Dc zYWwviV;`wIP0(v3&WF8REme0jyGk~dFOdW4T1q=Zo=M_7;2SL;fmiFAiqPO>8cuPn z^~HKUq!*IG*VZ!4cD%|3m&S1l+1c4?0K&*x!$5>RxCT@td9XS)Rms4}Sm=pa6k_&< z!E30Z8d^KKv^j_B^6RySRVS?!M920GQ4MHz>$q;33BXP=wNZ9)7h1mwBm^f|d;F)!fL1$y5m5ozvn1xs`m*3Th z0IBxsSUE)dukqYyps{bOd)EmTbI$u#RS9{7@w!GT3pihSmK(_Mm#%Ag-Ku=NMGEmW*`e0ehusU@|Y6 z6Z>tT0sgLFzC9x+g*_{pi6DJKB#>dx{$P8yJx~LGI^s)huO!t(ogq=}sf^!-Jp@!? z$bY@ku_S?2ca%#;4^aklJ80Ltr^F!+CW5T2`viB`#f9+OFSN{^|DbK{#s3T~T7oj0 zXlhJBnvP`w$(HadlN?6Sx{E_$#G(;&-Dlc~A}^X7fgWK2Jr|!B&J*RT5H1t>ZYv&R z#P0<~pntBKPA^_KpZLMoP1NgY#xcw9;T|)A6WwSH9y%|N?Ps5bB=j9pLe^0Zu{mpvo#i`dY|7QFrJ)| zL3}`v5Dqg4y#E_A@qUZ2U+Ax~kg=iLQbJ8y9;YbziJRj$O$`k>UEM6cbJfIkJn>O; z7WST_z$3l*YBNyv)nd>+RfZ~kTT+Sp=}l;EI@a!0fP<+d&vKdLXxYOB5)2OnMay(& z-o6Tb6^Z)UAdRQ&8*(Cc7gWLN{Mm{_$Qq~z+bEM_RQ!jA430H6&N$)D*$v)*tn~CY zFDjc85w$D5c6==Z85d7@i9VlN4YC=wFVIe@o}eG`i2U?iwtQ zf-I2~&^EQ6u7iC4m525@y|0(Bfa}F_lWkEKlc9HhDRz$;E>S}K3z5o-FrTRKNfsuK zI-+c~^~w)k6dcQ$Ger;1C}~8Gm!|o8L7Y>+f7_MBnS|J8ZJo82mdYw9D)tHI&%pyO0E}q}zAS)S#I2-s2|&2uP62M4#CWcdwq@d$*Vzu2tAR~h zB8?XH@D`%h(CDbb5&tvK&Ofu|I9h%>R@oedCGS2EwXQefj@T;MCJ?DIOAmtDWD%E0 z2y8oTG6=@Sav$yhK{@QRwcL7;#z=d6J1S#_Y5>?*u-E;h-lxnuSbyLOxVx&>b6wr+ z+k34D>=z+pW~%5~E4Wcd9tu8Ur$b&nW~7^IcSci0Ol*zTlmC3xnYyW=q-12jURcQ) zldfSvO$6TUWBvl0=HEeh4FCY~cL&r|qmp5r8C|k232upBa}_BK{nY-9tA-}g=;vr( z2BHIxP~jo^Mn*LaWXr~u|DLB*qbwaWvsF?NVT~Bx-7T55LuW>lk8t~L)ak_)6)XF_ zzA{4dn_d^Vyw*%$#JN%hCa^_Rs^?GwV^*ou1Blm^cw0md{Va4WSf8W^LzkJIHCKPq z<6NMwMpxb?r#izUNxT%ADZ-{6()mBIO*yy@WSF-NED_CDh-*zV{rWtT>AfK?MN*qM zZTUC#@X?U7ZvmI)Cs~{?+1Yn5>v7rLMggp1^(%HV;!H}aX=^ir%bT4|&TF9FPqn|dUg3=O`s=l(ppeLh$ap(F z@+CR#)8ch(kfjGKrkUR6Xkx*}@3aSnUQBF__<0++-C>xf6#()P!3|7SRVTW&f1q`8 z@`|y440MZPp^2X@L4etA&%HXrEF8w{stcg(a&TryU5m*MIaxe%fk}$Aj_%#=aL3j?06_B9LVD^dO?JyBWWZ`BPx(Q7l!?W`L#3V4W;4 z>op6QfcDFrZ=xnIllV`53smjb5U{XSVbVE+i_!qPtEiE!R>At)>IDn~(Z2C3xJQJ# z8mPpc2^R&|rDcx}k1dULwW&uotA?hGMP9Fxyp7076X)nIN}X1Eapa}c z;#4AyJ901G-S0tNcel^;*=L_;d-gp4_uv2b_Pv(>E}4ZI;X-Z@YI)N`EXy+Ha97B6 zodalfP0h$*V|juKo$k$fs;zn?unGHGJI~DKSp^PVUC`6hW1WpN*h41ODz?;DAXyZ_ zA$wHiuPlZiHuFANaJh0%2rv?S_Cy=_XLY`rQI5Ky?Z}S3f;JP!Fl~NmqP$K3R@ScV z&nV-}pH>`maI}X7zjNqz^KxCp1Nknb9n3K|%hBK_0S?>RqpVbGwaUe+sw^;|)oOyO!kIz~{ zCduT`*ufv|?-A`;i@J}i$vv+50~lbr1qF){Hu-@5n^jvA2L~rQ?l^ro+3`ravbSNk z|J1ENfG5HMl(uT!(bPABgK9+ivnYf2^8y-m5LLtz0!HxbrWn$>55>Y&S&(wH@|HU1 z?bD!zqYDf957rqzbB`?eknmEiU?~H-ox5+T0v6&uGugot47x+790sv30eIdWr?z$@ zlkOG~QK^t6J6%rV*>-?dqioyt1j*_bulBD&C*MeFl(#l`kPtqcGX`!Jvs?gqaFYUu zJC(*=t(Yt!CJaX|M08hyfrb6DEG4&<>+9}w)q*5LT($5I;9glnS_OP?KM$vAs`|C- zGlOZ19t`d>#|{ObjAE`cbTlzzb105hC6+a<1A7VamWIUM-xUODqH8U0hMn#as%_XQ zAR&V1^oN$IjEL%iw>umQopz+c5~st)!$xh6Sp7oCH@|T+9@;O>^uV zo>^cLd;%z+Cm$fjo-H7-0V_#L#55H+Q5 z1xZvy)sRd1oX^!z%H$Vc<1NZrqvOTV9{5!n@ltPlkFcT0_xRqaAwW}^f^U2-^hmse zbR2A=2-dMHe+Ggi%fK6JY<$!40LqOLK1s_b~`-x`3E6-QH2>xXNMEFwLS{*C0krIKZwH`Lka<<)F2EidC zPL-D~4JlkxquAF_PnRUd7XJF{udWgpY*i?4+tX^@VevW{V24AW&&8DWvYG}UR9XT7 zNQ<7R^c_610YfWVfi^bvizo7_Xi_azT2}HRbnE`f$pf{DobfGn;T>6^dF%9EJK@|Hyw0mt1Kq6&-*N;qks}lclxFkEbXY(4rql+HnV>a91^&K#I>3cUNCCe?)N z{^;D4ll>7c)3Ot@Um|a{bZcDqX33J;#zszJ(pIATBbc_xnuOJ6v9huCQD%5as& z*ClD8kbh_2j4XKB7i~VFPDL<5i{nOp`f7udTRO!M9G?DWsDmc8#t4PN^oR-nij9GD zb^0?N0kj^P9=kKfeiY*9hHZ5nA4~Yy}JzipK zzonU)cASwdnZN3QE5$3~AQUPL5?LW}JT~#;1r_3n7k&D?)H9nL`*L&Yvw6+fqI;Vn z_U7AMST|d5kd7P=J_XKuVD^`-fbIU|ixWDM6+Hn|Ffc68c|}0lLXiZOsZjY%pU+&& z>{jf>x!pY5>21?0u}zwYiHX@A_RXcc)AEJzV+#6$YEkE4-NMqwkG&w;>ru4WY}sqE z2AnBZ1&wfoy<~d656|~xCKl$(f4a`xr8EFAbFw2MEU)b7LlWEmb66Y)jb%zK?HqHy z7;_hZ7$D-Gb35DL43o|52x-7VJV|NjP0cH6qVKshSLEqlwqD#sB$xGBow&|36J42iZ@GSnbYZYpS#D tKK9Q~&#)R?(LjMyr2Og3+=mA2pTv}D^l@#G5gr9E7nY|}&6=R3zX8*sv`PQ~ literal 0 HcmV?d00001 diff --git a/vignettes/R/RDD/trees2.png b/vignettes/R/RDD/trees2.png new file mode 100644 index 0000000000000000000000000000000000000000..90b7bd5f12e4333dce7cc12b5e63e638d64a7804 GIT binary patch literal 24832 zcmeIaWmHyO6fUX=f}j`(s7QlIi_-ZjN=k?V(hAZgT>`!~NF$)MN=ZsfqY?sAFWo5J z(hYa+7ZmO}=iYy3j5E&ozA^m7&5pI#nrqHyKJ%H|Ls4Gx7~wg>0|yQqla{)!bl|`t z#sdd%o)8>{BlTpAgzmaPsD2@pV#<^x zzZe5dV38y5gwV;|ybsyysb8iqn}7pZi1mghqn)qRI2v2LiJ6 zE7L{4T50}lT`KQO#IIjySN?cTB~oEV+mzFN#6>q+F*%`GEP)|NjhCj(V@NB^N|c6J zBkT0+5yO;P^6+`{Vzb)h!OLaKW#Wmcr(aE!H9iVSwX^DIzg1%2-}TaSc>MArSEAKV zU1Bwvlb0E)jTt^m@vZd*8aZ8VKm47F&Ftz^;;?RJr87rQdxYG(cH`(R-6K|s3?+** z6!zSkWKz8is{TGIf``wOpAnrjB3Uo}A^gy_?6mA^UUL5$YwX!iwNHr~bQTM9d1x*j z;ieXu>6tAY7HC+$uDwjsAEn>;&9mJWrd&n;uC#%i+yNH&nc%>|m&ONh;irS}=N$Zj z)d_fU03ZH71%IwbYA;#!Ke}Be#f}D8uwz#x3{C!)`T3_G7#>mokErCE6Zq;gh z=dSHtxtoG|mgXGyA6V+@b2yqmL@qfX>?jC7n(N!%r*Sklv#=3#6rsbOAqYPshdJqJ zu&3CXiqPGaQ=}2MwAQEL<+#LgiB6P|hK5Gi`hkI<(sha5%i&)lbVjze4+S|n9UL4u z9IkL!S{rh52?z*qUb@VA`7%44!EWPZVSC?^-NJ@`=O%yeb6wv?&)WE*t+AyA4RYW6 zx|VjfB6M`fgZ}-uGfsU+anorlfiM`AJ^L-fV~EaR#pbdHY*59w+Qi z__-cQLNTBZ+&}VcurJ;qe>PhuSV`f%82or{bEXH;RtVDl!l%BqFO`7bMFHO zac~Jv(4Zc}564c8c11fY%OXGT9z@?nU7WqP^K0y(==V6+q{Ua2<@Zi{toWM;d;QKe zC~>@J0Wao~!uRfS@O_!--uVa6F9-=3j}55`{CmtnoNFF!`_9KXMtVZ#Ty>iD-*Ld1 z&rYT7J0JHP3(XtuH#$TAMvqHCz^Hv_-#@wUpe`C;EU7-S`+D%J6EcN||Gq8!(%_JY*?rrG8 z0yAFC`LIJjq!xUyGTqe^cyXvW6rlR=>!tFbaXUCJ$c(YjLNMr&q z&yJ9s^Nh8|sb%wfQdIKZ+WQ_MW6m(iABW~gdaZ$Es*BqZ%2N&s+N`yY%m^91do_z& zMo%h={aanMClLW7sr$iE(Y-?waLAkL;Nsp=C470Ts5DFdczW-UQ5LM{f4BN?t`N8J zpD`d_3jKeFF=&z9{e16g;aoMVmRm8?sU81YOA8^OlHZsST}{+&-({ZOW8YQ&=ht_< z!0M872M{lgvLqSp>k^C+!zEadN3-X&(-m(mrd9rYcSW!L^ZlaCK<69SxWod%&d0Ts=xs>KGiE)@fT$l1yjMUo<*(drle}mGYB5w@b%c<_M#LDKD^_X8ftK#1!(g| zFHYB$d64KWO?J!%Mr|+*|9lr%bbq70Qk0Bb+wp!;*oR_5f}8IyGs^S-dPcZ#ha4M_ zP}y*?iMPZ1qRpA@t(h+?zh7r&GPtfSRMovXdXD>IPKQC*wTWA2&% zlXefZeQp(7pUmWfVNH79H>J188w$P)|I>}IYc=i}5<-8&#l&jhQ#sz7%&6f|dpCCZ ztIJFg9~Fd$Z{vz5cBknK#89qjIh~0>1<|vv>r39=yqyUtoW|c=CKv{}X*(pMrO>Bk zFdhD_Qyxr_Kd9=W#7gW!W_Ch3DiFYeoZv%mi>1-Vk9wt#51NP6{d~(sUnewE*lu0J zuGgBZHv5C#mpk8MeC7s@@!9@`&`y)2lul>6dXeQaC0TaTv1}vyod6!HQF?uOn5yJO zw%bsUs?UIr5Qn_TVzX38V~j$I`^EsZWd5^>wvsJ7FKV&k`{uW;W=N#oKN$Ss-=S(c z!%?y^XZFEGP|xt+-2h=s&zc%dlq?s`=<{XBl6aT=dCvL3T$;oe|2S2+oRo#pz@6SC zTooPH#mS$Vrtw<~@nZTmp&WUG0j*layP->t4Gej_o z7vKHET__JQ$y3K(#O(o~ zgbxs8%Wt(AVu6^Sc>j$1woAA3L<*N>Z{c}Kp=WUMYw{B^WR?SbJL^@teWcs2P0OnN z{>yS?y>&LnRr3&>fFLPF`RCYyp?jGh{^Y7G3n^ZR{W4!Cqz9Ia>_%lZ^m3oOn6~R; zOZ8I8QOevkm9r98DX=z<^1y?gMH0xQ;B%#3_{vhdp6{vLEC+7$t<y#vA126qbel zJz>V*kMViI?tMl|V0xu^dtPjtgoH`n%hu_4-_}xY=baDD@_6CQV{d+t?A$dQZ)+~3 zYPw8i>zf*+nbenT zO_Wr?R-u4hrO=wz_))U5hO^|nhGmgm+PGnd;1#>^<}*U}W*wo*pOy$1gU(q*Fk&rN zi~O$1r&zioX(~ZmqsgM>UaQIU%8C)O?bTHB{&zj69H;X}elIjB_@=o|nVMI|3JA+` z7TG43Y^|BE3tv>ZH{G4rk!XEQIz6?AUGCyo>e~^l?V$;e96fW1hRU%Yy)wuocJ>fH zu~)HckzGQ|pZKBRCVh>mdQZf46(lDpM2Ti6=FAQCf~V~Wk67r82c9B6y{mCNV4+H;iu9)7O9kJ-b3)4S<5IUxeYZ{Jp$zgnURg_ zxX>t>DLq4B_W7!}X{yC6;!dNxr2HCF?yoo^C7OsSW0(e-0SaGjCiCtqfEV(s1ap>#CNYmlj>`s5h`l=(KlM zv^f}J*%E#iZ)cM_;VE70(0$DwntMnNGuM%6Sb5ek{3H<>+Yj~d;_a>F5?@ZY+1GNI zr^99jp$Zw`<2jmFY!%*SG1%mL46)~SXEe-gC0$^Nn@E5bD6MZu$_s>3&JcEVZ;UTWTOsFYSDVs>lBcw$n;aF}Ie?G3Y$2C7GYzScGm##x%?FV`%L zH6hX6+;yxiO{W_cbGDi_?LiGr?&i0rp+sLEB@LIc>;eeJht49{A6r9zZ)EAz)k#P&vNiudxe22?32Vqg{H(oUt&a2pq4obkfUx7ZO7j zO{_M_uoJfsn~K(k)VnCAMSS0dFX7NjHv3&bgxCy>>wSziW}J54h2n5XoJ?q6BAgHc z6U%A5xaa9<8s0<5r+po<LU9t zw7p34#`cFU)4sP}Jayu~=|bjkoKaEgqSN-~@Aduxmut$3*QY5q*A{EqE=%r2Y7Rm+fIYMI+Z*+>v7D~VDr)*clL_lHC1P$%nNj6y z6KT{@o0EnGcLU;)3sCg9&KtA0*6^cw{9=fcvoJDNqwilD7|OaWCh72eP#8xH8tr)m zHYt;j@Z66aR6u^o*TWQL4#o2kXD&aG(sUXMA|+|mg16%iLHUYT-Bu7GZHiaeMx{x5 zojEy1ePm}ZCcG>KR7*Z~IrT7^umu6Q2LMGVAMpo?g<& z%RIM-x59@qOsg_%>;9MZ=>pkiWjSPzpnKUKmYtL=ljG|-O})KRsn`9*D!rDstL!b8 z9-UT2%dNX(wPb4mi_r!oqgEFM5H~sc(bp%;vRL4PLgabFa4QLT5Bd(ZfQi(CR7GD8 z5+*YB$ZrI924j|D#@G z7&0u}?n!zVW=s$4slHfUR&;w~KGWUzroAQUYy?=_5XtClctCTkB(*E|aBWzw;!IAj z^MvC}(K1=*2tL+=6j{-EHX{`z3c|DK-z9TFX%DvLUHi%Hv5v_Y*ZEQ00{}r*W3eF; z`S)7@+WWi4X8-DYG+*nu^6RB+)2(AW)5z*4iYse#uIcO=cO@6QbP3Ex1t9iJ)EwBz za&O(lSulQQG|IRBO-$7@W1L^qTYp}j;Wko?@eA0xDivZL>!-Ve9kR(B7h6;ZVglto zk%(9CzBO;A{6ovWgAubSODww5FJ&lehFG(rHkvAmS-NcoS$wqvtL2<-C&ySL7^NL- za~U2w;cM5xNeyARDw_<-(6t})^;B2ZB}-L@uSjCkIude z;C2`!(pkBdb5t*>0TKsBzbxGqEYoRsvfZ*|Gh^s6#QwURHQWGR!sEyQOt1qeqTn^V z+ZC@AJDpO2-2`c5cMT)#b^-+|IuDt|n7sU??0qisjU4JCbJr`Qy~qH&0%$Lahdn!p z5Bq|=;JMA{KkW0ZEEgbEH7m6X+40B7mg|7%k>UMnY0sG_yao53ey|PMMY~7!NlwUA z^U`(hMUQA*2p;8!H68Zi!heH;S^YnBrbcyg4jb=S%f+NpNN~v@uVFKt&q|WOQKp}uCZCeb_G}JU5NLcs z#yQ-5hfJ6OEhwpW{4-2k0$X5TY-I+I>^sz~1jr*cHDk{-PmBVc|1WLZ8C*gvn7OQ$ z{vHEN)9?vU46{Ge-d>t0t9UpR ze?4*!Nj*lf!|eR8C+vMSj?M5-4~Jf-_-fp>zx9!XJQ$*lE-Cj%6d~Qq^VU*FDFwOR z=EM59^~kgSrrr(Q@SvunK`&Zw->YH_@rnYLpfT6756J{Yl2pidCpp?0XOp#U;E9m1 ze)XF#%e^jokReqUBTUf~Q$@{>xLA7T{&P~@sq8k3J0#6r;%LZD_&C1wKN;;SaR-ng zX5Z^Ha@VSy)a7pC_npZ_e}H_rig^u5*Y3ZhBtsHxdLMqe3U3N>BzYBvkRl*!d$$7@ z(;jD)%@bcr?<*V-G1PhQ>9OCXhmLze80JGrl;KG)m7Szgo1_1xblt(NugBd^KtS=qbNU0 z`ue=2a=utF@-w}Xs!KScL1||`X<`zOVi zyuju4XGKS+nqN#4Avm{3?8nI+H-zeeot__<^w|t2DJ4gp427_+lb2I%muI-+8EUUe zy)WD4N&yCFfIG=4ma#mAw#`#m&(t21c#9()cr?;rH!+jrytkMVM}3WQ&*3z5!=AVC zjcVO9`!Y6I!GM%%p4^VYHhf4ID2EV6T~K^D)V zcy;*8uNM@~#xu6BOTXb!HL_zLE+QX{gRCn?IIfrY(-q~CXev(p2^PrC3#TUEaF zT|o7*JmTGV(||DpYO+gMP1j*zyQbr%UjM&#%P}$MU{pJ5Gzjp zeNFCb_kb-%xa%*#?hNFXY?{-DM$6+IJNYm!hIW$WFU+!Kdx+472$yyjoaeWUQc8-B zu5|!ay?k-b*LUIpcX}zXn&j(KxjKb_BrPJ_%LWq0dH_`Aj|}|{X6R7Dn4nyPy703j z;WqLJg&b$c9F9`M{mIN+BV~lYyJXQf`pZZZykJiQ?Md%=BEbtthM==1l)EDey@8@J z4)({?eSk51U!bKuzkqGp6*%5iHkA-BxRHY_AG>85OQtnbb(j9>xpDJ|J@2P zvj64^aX|mOjRC!t=VQ`v72v6vVr{Jkd*(1@fq`jWn|h}A+ofUBKkD_YBmA%&Y}(QF zPlC;7OO_~hJpk5Vn^j7OG_n_!1^0Ql2oQHf99ZSqcPJaMrO8B+?Y>I|PXgRLIA^-= zkO?cm%~sX1eY?Y!4&WxvVB5Y!%_;ylQ_?W|;pTOKo47yH_8m${0W7DX6T9cJX&O8L zZXWT|*>~t~FxV#}lMTESO5>-}XgK=wCCJ$wPqRQd=_xBt{?vd5uj z>AzX^a&ASR6xX6?vDI;PC`2#Hb;%>UO_>4vEjY= zz(?v$qb18`2n=mMdtm4hgbSc^vRh+QplNuGH(VD%0VR%~&IqM%7++fts873FPYdB4 z&>2T!WV0}>X*wm7>o6ZwFDdQs+P5-v{Sw1<&I1o!^ysl&nhm-Y6%Z-lG&gvL&q8wq z-)W~7d{L3+xqmB`>W~bb;J2S_$1znrjkX5W$`BQGO47&D$&SoScU**h$x%;(vP@1T zqMm`rIuqQC5adzOvTH#?Xy!O3R%^N2MnNP|1t0wa7=dFSRjtnAA_4ubO)U?-*0;s5 zzKshA5?=WsDH_0olv2zJl9P^_w;V<$X0!+=KEhs|H(g}$4;X>3U8`DrO6FK!Ja=xH zL^L1Kf>5#kL@i}A9}e|}KmAXSSA0J%7XI8fYr|6A)pTuPtmDCQ*7=V^ zMRZbXKFf=vXHWSG&q*N+a<1$7pJO}Iyi69f zFMg`vpJ3cGcnsmI-dM_W$99XZ$o0oyKb;#$4nb}Pa|q|kz&?LtZ^Z5OZ(E>vV569L zWoqw{;}?+)-1)xo(%;Q=5T^w+VToLI&U=cf?T*z*KrIVU}i-gB81*lOit(xAVjB;As~=uMJTP|} z;G06dctmDvq>d)ndBincvc2_I$ibrZC8DRIavJpi0xmo(N8ub6%d7%o*|7J)0*v!G zs+v|e01#XdU9D260I?1^WQZC_C>Nc7S6>ENEd+fC$k-8JAC?IZ`}r|`3VGNyG@>Lj zs0}5xES#0N87VBl+kWpU|MZsuUEm|Ez8ztD&=>YuBlIS+DMe0<_C3|3l)-M$eD+Ju z3!@0(wUiA!isaf_uKRkfC^cWyveRC2nIx31DqW?gP(#JPugiI zL(vO2-=08JC9Ne{ovl!-3a({*cUfO%W71IUwSI^eZw^S?5H`>(x5wU}V+Jel*;SC` z_CkP*p2kT*Gbjr`>@Q_|6muXT2WUtp1go*dXg-=Pw?f1=z`e=Y*V}F&Rpu5z%86H6 zongk%m?LbUMuPg4Z5)F+NDE#;X8k5L;v6>f%_ap^2T&Ubnnha>5HR$V&fNjSSfGOt zOnxjb^=|-%C6jvrj@-G~k3hW=(AtqsWKUxQFnXNe?QNxq^w!%ra?F0y=Ci+F zb5?-vg%pSy&)I=WK7FzAmQfHNly9ugBQmeW#0=}jn5xw(Cn!CoYVaFj`;u6|(Zf|}gBlnPrYOI0G}JP=dONLvN*FG2m^>orGy z!YNnt>rC5y+w1P>7#Y@bAf_~O$Elr1-?zgY>w>neM&&}1MmY;G2q;RhV?o^{PgD>t zCZZ4|Ck<3~Kmu;rAOtQE{a7yD4dT1ZFR?)^>O;}vYr}tuOjlLOS zSCIwC>x^JZh!2x>TQ2DH_NSMu05BYsq>T@CqUcEoA{~-a>jd1NP$w;YX7VRe+kvDC zyF;Us*|hP_R{X0?od6S3-1;(8ygo#O1LJ@-G!WjlRh6VzVDJ>p%UlF%Iwr&V} z5loP``{DO|Zbq%t0E-xha#Gc@{;y955S|Fq0kI0Nv$sRmD3q&ORCOrDfOVwTdcp&U zZe^{lgsM%`Ms8!u(mly3Fg1TXksq=RdRHBgEB$^&@AIRgAG9K=PCd1Et`g!?ueVob zWHLH+ZUWsrB*0dXHwZsOOx(rJ$5}vZ24@(}859lC62$`YT5^_a7@l=<&4*VZxYHus z^`v~iok4;rBJINddSbp(uPuAhB>%~Ie?pv!PnfXYa4!Bu8VEI;mxOgZz zj9cW4$U`|@bn%lyHeL?5gj2nnZT5`KNS(Ls>Tl2;o0*BfY{coZ->6*l+eO-!3+ZR&kId|(Od7~dho;V zEeN>cqyEHQIsE;_v6@~-_B$xfP1>Rk5zv-)IxoY`4WoRvVTCQaaz~#60n36>U7rH= z+H0gN3&~}(+wx0M`GYZ=@te%ekkhgIUsLe!f`WYzyJ~mw&#Z5qpaAS}Q(bZd$IGsD zMh?xX)d~jA2P?+I9nn$%p7LYxdxj!j-i`->{44!WyMhnNzu$aLuD$X&j1mb0D&SMp zB;^Z^G)p0`vtVqsW>Qic%EMYv$e{Hk$WZqE1Mtz+Hbza9fhsjJhR=~mlanp-{7DwB znV^e*uz2cF?47aNB`8AosW{}{G3qZx6HS?}>(}8Mw|wIE5M_Vd;3m5%zfRs$yp)|5 zWa0mmJ8BRJV#geYho&w~G0Q&FssY0XT?Z)oR#5IuzmL5>ylC6!AGN(P(sA}}t96%q zl(3Wa`G=(k&Cv`++L%7c{BT4@i|z%UVIkPR`8W<90ry(1)jG!=<~IuyZJe!Yh7mcL zlE%fQ3T|_O?J#dw0&FiJ6VWmc!!)#u&19&gc7Mbh68j@;Ar_Aalc@XJVoYum(R$r* z2$>r>O`Ho7*XNx}Atf7(M14e4W~=vulvO;q`Ai$x@d6N~NMq0IO@WA$d7DdJA2(U_ znPXzY^m2+t$y`3`Ad$}lm+eUR^{26)a81}7g7_-4EyPV%ncp;Sk#hb93SQ8j3`o-w8mKR-lXeQwr15Cuswo!P83m; z2g+LAx^RIGH81r=rU)+C<#Fi{k8BHL{d|$7c6uaxon%v=aO;D>+wc>3X72e>VST_u*>j9~S|&>3J!-~ouViH?Ifv(DF<6J% z_z$->TgwJcXpE#XtY;-QeX$xaQPaO291$EN?@n$k+vh8~Hi}3=3LAzrZ#b}N$>$lN zZrZ4C!SjO5tcZls45ksmYAyU3Uxj&*mJ@fk7!ez zgNKV^@~E=AF_&v!_eKq7Ms1prX(PU-Vr6!~RILapb}I6Ly<0T1&@~9-+$a=~4+)Q< z_+w`%nz;*&9=flsQrxx-*CPiG3;knI9o9bs%0J_R(qhbv*i+} zRqhK!7JegS9pq2sdKr_W?fC1te{ijQ4P$(4l4()w8kxv#`--!MWdP0)8F_Z9_9|?* zgfPga_)JJoCIq8GqXKXwRC8?wZstF`$nF|?!5%1|4lR-9d1o5G;1eJeDi{Fs)3;Z- zpv92|@Mj^qY??B3xRJXlSi6mS^@pT2pMVx&M}&DJ=;@MC7?xi`2XTsS0IkMO|A`tm zRFkHt1F8q}bUifFgMkWl!Qaj2ICyT%Hwg#dP<>*b&j#69%cm>a>BScrn|rHAdfdlq*J zDqq7{U3nZBEYnU=BYxzxLGbi*T>=5fO|1!pLFkjdy|rHHzS-tp4u#0zpwCc7s3b30 zDK{hi43e4Qi!HRs&B1;z0A>WWVzg?S=4U_y%G9TOJx6g?~7gluo3TP>H`dS|ykd1YQMjX5?ZkUyuI=jl9=LlzZ{= zp-}{;54jQiymO9(qK_;c#FJJ=gPW*A-5~N9b zf=nhE8B%TB`14ZC3u=`@yr3OIbUz4?0=|g^1kfAb6qZPk0WLTnOtM^Dqiwy4Jn%LR z`@!&H1phdKoLUbeWjb_#gk+|xW7{@(5C*t9=^>nYV)W)c&!=H+DqWqbNIoc;h}rk$dFr#phuEp7$FFkX!Ct*a8np6k2lLBWj~h z18ie{ah3k>DYyi8RKYr@+b8xq8qrDs@ng>%_TGIwoU5n>d;7mZ{WnuM|35aBPk+gM zYhzltXGczrY#$k@lD>KLnA`F`8Z>nrFNi76K}u?H-l5xS7TR}MkQDrR)F9Jen?=W+ z8tuE2($Zc{gaigItKVhVGClww|GH33_svBs*eaDA1(TNgB-VS(07cz`= z$kr<%cK>dQ+OkEezlhGe9dTv?->~%r?Bn#wN1NuV=AUHnDXkN*vHWQeKVA-9TYA-R zn5{ssQbxpCcmhz0p{8x)4W#ICBTCyds!8(r4qSm%8Lz|EW9+U#o2l6R;fP(Hb zcbMg&&}0|$_FDo92B6Rr8UOf1QTo1@iD!Xw+bN`9s)nFbav3fhlr zJ7)~c9TX}OU+M7yH9_Zj3#kQs3qQsAB#z`QkEsfoXeXbr+(WVPgpYLWXU+cc*G5pp zjmfJ>noDu*N8{_(u;+S#N}drB%pB1eJn1l0tl7NiBJb9pj1;F{OftR!yXIlEURTcQ-;i)m-Fh7-7e?m3IiaJ6kV8m`3X#}+oSQo@ z473wJNP+c`)-n)bC&}L6mhGcx$>pG3>X%+51h!*Q9qrzy@i*U{8mRD|nEO@RyezShQsXBO^_Iyj_Hknmdr!$I z=p3AoECeYj;g2c2$O#N)A}$r2E+jKNGX-_z!m$=-^&yA=W$ z5~2G7E=U(LxanJGZ=4r~GML5l0E2NC`ZblZx3(%04&Ph3D$h6Be{ZgreRT;~5*QK7Ob^rQD}glr!_^<#fPD6_gI$ zClT=ld*T}EUG}sK{d(|tF;qF}i+pmRr|YeNjlrWi=+b{`rOgnDf9K`Ex9{ zL(Nr4Aj7bv7yM7AY_>c=N6_rM`4@RmUb=^>C!G4-$b5POI>>6*BGxv1w^w}CE+w|b z>Sa&D@a!QqF8>rCd2Y;wFhuga)pdlgn}t?A2df978^+#*8e?8RVSP^`t!#mE8*ac= z{EpgQjmkW+{fnJrI=5;8TvfFwn^cI9#pkbw%Sn5$N&~A=E6Oe99))lkyf2KYT0s3W zH40ihe93mePUs4xou2v-JB^=gKlBc~0({Gh2`gH-Fo!hw*;mjMy-Qwf1qmY3i1iyO zs*(|yXz7@)8eeu*pB;yps=ao%GwQ+@2SZV5Q#GfOw!|6lNe%TnfjmT&Wo}py48+T~ z;rrYyW<{_8JHqC<^eW1r*(VCme?ejs;gt|o;EZf^O=awH0!)V|WR>htfwp{5={lj& z=ZQ2GXmQZjlfb5Fm7*Pl#eba!+pe+1eY+iszIvdme=V}yeMQ0F2N7`B0+WNEGU%zjdEWpQcBX{aN1 zTdC363P{Os`I=^uV+umLbl}nS?OOk|2D~89`oE9w8OO|y{nTG8MfxG~n;5OKdodmF zZ6z+G2nS!Y?Hq?9)v^MlUJLTD*&KY5v=^<>K;7|T^e-a(Vu*9-Fti#jbpaa`tW{^- za1}qhuV<)o%&eRVT0A(!N?tL=@o6x7Wm|wg-F``W<(*+znHPH+k}a(>e%)wIg4z37uyhjS$lJy1*yHC;nKa{HPiI_ zs4VMNav?`^`ji@^ zk~~wO|7*%2S44hO6pH?xIMV73belzSnbW4}>`yk+8t5jpz!Nuwf{HQ+n8(T1^n zJSkB=Zv7#6(^8h!2oFPg_xDTkpx(P8AoU}XE0nhkZ_<}A7Oeta_fGqe1a)t~kKTUL zj%A5u_6Q!+ZFL#LGZ+!W&Y6jy53EY+NpkAMwl|KSb^t2a(1wm|Sz_Z5e%7RvUG8xW zq?{0={Aql4Sokc`hHKSn9IqduVAou{w2m~lNQLCF=f|P~$f%LD{#6WVUj^|deQws2ET!b)wQ;*4sgUFtg~n}gUgba+e_w%?!zeA*sBd@GzvWPAq^$M+ zD2|oxL-N(Zx5Y{*X5F(r5-(B~9Q=cM|mq8=0-2gX%(KqWzvcSd;k`AAI$43PbR4U{)VSEfAmj z(Iuo`$!>y*d8%e~e85)vzTm(@(Tpo+*!&!D6%`R_q5W4PHoSzw# zJ>xy9AvnPeEwgOx7rrC;fe=x!$^eV5>78~NR-M@Oc6cM)wX8K|LcGPd4tB7=Z{&Ni zMQ}-noQ=C83EoKLHQ6x{q>Cv=dTEMjqb;AZ8j3wA2R!&JixyXXS|KVbn2*dfD|r(F zm)q&I?3+uGjn?zud~qevwCR3qf zvLnVU^+8aScSu?=sX4;jDiufC;j~Bye9i*WYQPJIw~1mh>D^78n#kDuLM-yG>(v3} z@-vukqR3UYW_t_Tl79qy%E^YS7*Eu?D#(h*Uoj{FAD$Uz87FXLO#7047lzTZM}%G_ zM|99wL1g&Vka36}vSwM2YjQowl(o|!&4^d%f5Lt)w+XI?|)oOAk6 zFCMG=nCz@nO?DS^bPW4ciDAj+@078o%&3v9pYe{#Lzr0Zs-n%D-||A)+l_kva;@_? zuIjCdX+2uu*4#g6amShe=+-PQ)a3jOGRk|0x@nOY^>swYiE$0O#S1akhcdb}g1dU& z4~^JfU@&h|pBDP8Tt|`0Zj>kXRaus87M(hk|0Aj|h~4l$Sr3`3ylRbeiT=B8Q}s2B zn?_dhLP2V?+C+wu5Sh7#YG;e`Yr2}m?D7x9B4x zD&x!6#M(q9RG7B6`FiPL(1XxKV*O1Z*@57v<-7hahtkK68+}{s+C>9C?a4!t$&7s+S zwVl(EUdksc<0x_n8E;6jJUswi)=tOnsMEj-6g5~^Fw`D5?lF*yJYVBW(YB^n*k|qQ zb2TX$vy_Rr&7DlD9J1qbHI`4(c$kg=KK1GPOUXlX$*oh1!8Ip?yB^KAQV+!TZHua` zY>afdE&bDqMDv3e;Fo3ONkY{K`*2&v^lkTSu1*<7c{^xKuXV={S&SLWz8~sQ)$vBt zOK_VW&;HwuqiAw)nVEU*7;cuU6Ha z_Bz|4uQ6tbB*!~BojF|7`GqC>2ww0RL2iudM62beSlvGfw^GFZHMP(v(gWq%Mxc_8 zOGYXs>(dFBLYv*Rk($gVv>!~%X<^6#+cQ8H$kTph4LNv?vEDBQ4%e3pg@sBUx2cO3 z*hc-851d{s6_n`xu}0M7oA9+(@^z zF@7jtIBVp}v_&DM-qyQ%-bkZKZPyaeU**u3Luboi3I)&$DIOW{}@5lw@F_(`F zZ9_B8n73@?RJcu@2Z9@%i0y9j;ra-UCm@MDk|Jw~vdp}NBt2yos*(JdcV{)6aJ>wj zDz3=_Gx7Js2uA0ysug0;CumMZ0L_kYL6BT4w&5YtSje-4bfKeJOVkLeH)C@)U1GCW zqE>GTgTyo)zS)!0H)Zt{#`Y8+1uZ83iY#w5?!i}8W+7b*Dexz??8!22(p~xUtD_?z zRjCjf&$L_unW3RXQc~zGXh{eV%4PPY*amH|hDF}w?P>VP2hZA4 zO6>?S^8JWz7!#LaUHD38Td~Tyr@a`z)2EQHPdLuk@?pE#k_&8t8GOY<#jSxvsd&v< zdgP*>m`qR3q6MWThdgK-DuYka{h_U&?nv`8tlu~<_ZYQ_XEN^- zyS1rEkCq_{2kEO72z@KXHk-uCp;k47)jI>R#=|8Y)voQfI+PACBbon*Rn9(_l*#YO z4=9pTJ;gun1K%y-Ct^fUQ7%VMbyMh_N-;>1%HU;4ks|)w>0fWl31U`h3MMmTfej$) z=rbam8?(#_4x3HJwk;+gl>{PX{Zl~NK&E)2lFG#_9JtPo_^vC4bw7ux%|I8xV+=YA z&68tnKarZb>v#d^N7`Z{6Um$ry$-^`>TLWbzw%_UO-D={y1YYQ1lZa$cn_SCWjhH? z(NPFpqdb%*UD6buVmmwr1!@TuH^JBbTk@vaXY19gSO74oBc%|e`%w`eElw=Jumptc zT`1yYbYdtvkrtruq0sd%R-xd&*7V$Md7nyP;M)4jqh|vm%*UGI>#wcfra232#(BW# z2%{$H9GP+keR=`;2ucNTI$eY}lkUfI$t79ZWo;sPVee#I-qopG+X}r#F1e8T&5aQ; zUueS1+`NsYaFC|KBtE%Bg}@&3aspweGnNCR3?7|)Smu~Gthzk zEX5rtUVSR1nb4gfm9lhUdimo zcK0)l%SSs%@z|G~+!|fcVHWw!1!ks5DOt#IZmOXXShzjRiTaVlbS9vPqK;8$+o!_u4JZy8yTW!#gLtlp4Gkk2p zl$GsyfYm^S{_-*D8k{y0*^twl%kJ9_h^C+;_QrAHKd6+%#5;M$T(1yT@jEKun`;X1 zfEugX&?y>4R&Yrw8P$xizDPY!!s>AmNwEm7dl4&pq3H`T1#hweVMV4v7U2a{v;=rq z?|Y=O@Vyk{-%g^)mba65I8b}LhOx`tPp5!du z+o1#g)8<@I5Z7f0^52+W&w7a>MsuTiwQ?g3Dd1C0=dH?4_K0Q$N6pz6a1~8HzQGw$oq2s` zxnz6OA}Qv|m9ZL?!qVugPm%VCJR;smtjrh{2wFJ|!=c!BxF(E#q7e#^WH^VQ^2_R4 zT_6O^oRt-!MMdku!t0T{>~ByB1gFDf)bh75gTS(MNF>r<7yU!A;N_)ZACY94{Kd5h zFK`#C^6ll?f3pTagX0X)ICDguXWxakK!w^)>T>?Yib$Oo`Kk+U%DH_P3X(w+j(z&x z_X!T51pztRJyAKf??Or-ASnyl_kC*?6#>Xo@M;oLKHhyO+6PEIAFav1oqF(I(7$(0 zIwD$me@|Q`9IE74!paPHu2p1$xsh*3{FhI`h5t?0&a9&T&zPcta@%eYeqcMoS zKiPKME)w(xOn*N%+qW(lCke)?1;#3rN4U?*jgWHV(z(CA%g9?PI>35Q53(St<6Y~C zE&{{X{{Al!hQs4fNNtYLe7?`5vq1CFY3hg-5${gVZr`up-y1<5wFCy{a?E$%7!nF# zr#V=Y?z6cDq%Sw^7uP@Qh1~&nAD2(P_Sc@_SDj$@Q#``^JHe*m?mbn?`@0KV;B|^# zRPGz-|8gE^-ek4?dAo0*`EYkTCRMD~W7i2x;jQIvf5lFNkvtm6B_V`na|ChM0P-0i zD5-EpI*)%s$PR4fYivLdy@4haSc2|uEJAJ5a_~k3 zxf0&zNP#eW2;XD-sJg5(g1D7vLi3Nn(h!(3>D?@fOxfMSQusur9@MQnNPIxSg%ocPO!r!>)Dr8w zP*pHQ=8nfk#4xk@tPSU+FjmOt3m}%-3QY5{(NE_C*|5qDU z2K1omQ=Xkdw7=y4-|beEPEh>H(Q0jZ{z3RTcILTnM)%FKYkW6R#V$QCmtXOzz2IYv z{6y5ot9@B(tsuP4teUngUNx8mJTAi0_Z7Ra(Nv%tY7kbiw=|c&pNF{F8C_#upDhXhS}AFMUJ)dg`@%PyQcf8xs-$ literal 0 HcmV?d00001 diff --git a/vignettes/R/RDD/trees3.png b/vignettes/R/RDD/trees3.png new file mode 100644 index 0000000000000000000000000000000000000000..ededa55f50ec7b977f00a93ae447b6388b585706 GIT binary patch literal 25988 zcmb5W1yq$!_XY|_B&87)q#FdJq(i#9Q<3g&5CrL#knRR)kVZlY>F$sY=@z_m@b`T` z{r-2|wXO@~aLzmLJF{nI@BKXc8G;q$#8Hv)kzrt9P$eZqlwe@)QNX~!iXy^;cL@1t zN5C&H&4h&&B!z`Z6zpw`%`A;zU}&PPq6DNnUSfPnACoF{Rwx|#Oqz^XGdAVzQ+xXk zEtBqcdZb#`pRFYa(+=awPnfqOpNC)kgx7>4bbR=tfCP=Ov^Yvsx~Wcew#enl`S<0t zb3U|#cQ{`j2>4W*Y1hB2{?t-I3y*(5`j$kDl*asBt|6B+`NfasxkcYH9NOdKOG?HarY66iN!l1PUnPG@p}n2CwNR+wqg2X;{TcM2DM#0lEz9QI83=wvrNrP%7cG#ePQzRhyVTx9pn08LX0lZB$!jG|nE<7)A} zsT<1_dqXTJ&KHb z|9K573cc`!vaqBi_^JHb-pI(>!PLg_;oyBwaH|P36%9uXSsCuvHdc(U3~daI7+tMw zp-o_TUAe(aD&HjCo zwZlKd0s~}%e!|4e_>Ael_XbUQq3?1ln7JBRYKoXy0rP-o@Uyb8^WI(me?Iy5iGQ@z z__rnJGqyil{_)BGwp4X6vKO|o0*`d$|958oY5eDh|1{)ff{y$TOZ+zT-Mhff{K&ja z|D7{_AF$t)aX>VHB-JOL7E#>70 z9o5gv_=!LK*hb#>((j*W`l!-Jd0z+y7US`s-am z7&N3A7(|~xzkJN#mZClc5Iyw!^IaIwsTTx)j|$^iIOzZAP`1JUjDVBE^xpq;DG$>} z*Z|*M<-y-0fC+@N`n#ioZET?bGQyD|FaG{uDJlc?ua3eXeh`B}3pU$SWd3^upi|X< z+YScy|Cdo`c_?bk$5~K67Ca2?l~Jkhu%ag0VW`|)o&NbDM{hlC&}F~5dy{(02rxYPzKU3l7#!ZfrwoV~T$^Byyro|-yP z&wY#1eIwO{Ov$@kh`dz0yn+05_bc(69FHIMfqGu2svSjBbvDGZNo>LE-x6PIB$DA& zyPull^Ef{()u<4Hvm?#+L&`u8_9j+RQ4xvcJwoI=XuDTy?zSl$#=7zI@_6fP)+XN% z0UckxAcOKVDf4IBIc9%+J&!$hDG7;;NdBuJ@2jnRg(`8jn-9#dH;)Ge@~=*J5K6@( z7g9`qUK~2!UT%!;l(iC7|CHbj%dw7*QkPELNb_LK&C64gl#~qbshKHp+L`G5C6~&r z`Z`ffK10=HB3r-J+i`P^y1dYRry z4|VzwWr7Zt{jG$tMx8a`>5utjb9`>EUpMDwv$a;F!ACdei^`*UvW0Jc-JUG|y5-1e z5~G+XN+wFiCrw{qX}Me{z<>5qFnn5gI8?my0-<4(G}d|BzTOnH8LKjpA07|*`4;2i zbW(q-3s3)nrWVf$Y35HpC78tdK#>)j(yZjfbgdKQno{lx^yzws{GaV{`kXv`V6zt7 zKbT!#Suv2!eYe6#fB%ShoKd%drY`=sXIs{GYGr~L)K z*rx$GsH$}eR=om-$H&L0(d@B8u5iEtvCjJovZ>roMa;!&%^K-8tv71UNu``p?e3;) zgi<{76|WI$EG8qcu(8RX8&!yrlln3sn(he;nWd+_+#L!uz42Hrtkd)gI&x94`1PpB znhXr1TDAC`^k=eQUkSw8BXhH~IX=5N2)v9|__(9j>>Swp8XJv10Lg7*IMN9@#qOZBgO$u#^ z+m%P~o@$Wr@Vgk-3T%EIBGs$Cy2S#$W_KN@P9bY+#(A&vc{Zb7oO(HDYd7SSXy3&n$GsWVTI^rT z5#iyv;RGD`n(sbVy51-zKesX+D^NVzMCG9ngP*uQ>SM~+wrN&PqaQ${YTW_>i2-gX zaH<%6dHeq>FWfy3y_BzMKX<3A)`}*}^qGErQoZ); zb2x0h{=rih#H(Iu_x;OQjwH6roYSPPxTdD&5062VA0=-t*YLK7GW|>(J3k5!hcEgN z3V6>kgqF!Ber-NqknT@ncgSBxaTc+zU&D1l=pv_lg4fd)(G4bDyuQ?)&$24nre(UU z*LH?(XE3!K>r=!=`WwA^_Ox&*pBn;WO>Pdo z-Q7{5ORaw97N_l(+7@g46e+dlUHpd2?e8iH-S+5t-WnMh6)}mQo{`Sh+2kJ&@I*3x zT7tNE^nClKl2mrgpDQ;$@uJeS^W^$C{nu{Hf^(ow%Skp(4(~{$O!9Jys|1^T%tw!Yf;s6h$cKl;(Rf0tbxY*-HrSb%Y_E4S zCNHQ#G)gQ>SG&%7{Dk90u{GH#sn%?f>NSQLy@zh=Vf7j5NT}|lvOM3KHB(0bs)HAZ zVIPyOWL@Bsbzk%@6Ox=ojYUjwu9R_a47<$+lGCLm&`App+_(f1CUysb$eI-0%(@#t4Q?3rb4gb0m z5ywwUMJ48}m%ymI9l(fTe!tJMGZo3u#YwoH0^1&-+j9PxxoHLX+ZKFvkcw44&1T>j?^FkGc zN_DH<4LSEWdP{%EGq_iUQ{-!Y6t=h5<#m#l9?HT?=v%xuNMhx-lY)j~NR4x=dc4?1liZ||qUvsz3%b>pDZdz!~CB4y2R zYEiKj_Ly~b@5Ysakd*DRs}LSkOgQ70!|;}) z!qV5u2NbV)n0@6J30?Sx;Dx9>L_{_{P%|UbSiL|?PTS}rJF^e61CzBxtYc(eGm)(Z zDii-M33>U1!Nn!Q!mO<@5iH-zFBy5ycjfR=KCOf@Qny=i2rn2QN=o|w{%cyyk2BN+i<#=!H1p- z!}G_3N4|eU5FbG}q&CuE5*-}WzhMMM3e1z3CC>w#ztIiJTL5K5Fc?Gr2TCEz!JtXn z^1C8Kk`NDqixL09Dj9(=R~UR_QD5aiv#Fk>q5FT`4h}#lk!OA{bpAp+<=6m&Iag9S z!u|z0J|ix@6#4uB7u8Ob&$L^no}Qkb_WASY`0Izo9Wu=~{z%M1hSS0-4}0!Y_?#6E znr7LvDqEkB4wze9RoA#MhDA-ywhS>!!F{tA%gP}t`hD{t64(<`O#jfVcO;-Ih^#Lp z{_YAj=*n3&eKq8-u8;!ghS@HIf%VsDb|nFJ;@d+m&60r$)DnHnS#%HDiU3oP*ns@C z&_i8dgU$8;ng79YVi*9&!HLGg{yhO1p#XI0PDLU8^$fu_M1bYOGJ;tCgreZJBW9cA zUPJjb&-=vu@c`ionR;DDJi8lXIM!1%b^T}~baj{2I)2z{d9UYKamk7H+30p^+N1fU zwL0RFM7h<~Y*xVxvB=~UxGeA@m@-?wd#7jKP7Gv!W{YAg4}Gnn1}yi`$C zTu$s9nS4ZAQSJ5(bKqN@$g=6vqPgYGZ*HQ{mDf>2aHO3MGn3Cx5Zo)PQh%yh$eqyb z9{SXy=6-u?Z-0MoSlENsw|VmEa_IaQ8Tw5R$)5=sh!zV{{WTFof-k>>JuWJ|y*+O& z{-Vu{g%}Fq7baSAN@&8aTC6tAP&bD~QCCCMr78VH;@;nrj1dH5 zv%%wbWH?jrK)trQiXq>>a?nd(*8K~zR%OpCCQ)KZ*bViFqZH?TBqqK(RG zea?z^cP-rv4fkeh+MlXT77^xIno+)U{A+Du7(qluMcItMJ(7}^&c3-m$F|w9sPm!K zcCMm1`O{A2_kBUYRJ=ah+geYy$(yb+nH_ojN_cv(t5WpX59`mH%2BqVOzH!)u2pG> zWHsNQN<>(}iUKz0bi)vXIh<{Q>XO)WPxk}GYs zANK5NSd{;`VbtTl_C^?Kn@K3|$&l~~!0O7z#(B)|kQI=hV8MpLVgAW{>`IdiO_QCC zPieEtOdqZL^8B^Zg79G*(-^Rj1;HbPP|W^pDA+~iz!Bkm#EhebCP}~jLfE)> z&%QorhlBh>q06VMkf$D>8pvBukutottD+0$zQq!F1y#-9TkN>ic7ID`wQ5W)Q|mM6 zjaO5WK!ChJv=tBd5B=nh2%NjH>>RHg7kC89t6nvl>L}d5M(BuPJ2Op|Nz#re`F}nW zqFCyz5*2#kj~Sz2O3LsTf8#IMR%jCH@6$blK(3~$I()s~Dh~ioR-SC~2uOzt!Q^Fb z23C_`JcM}m{_uglm!*V92PSw(48|+yxtxR%KbVU3SrRNP&Kt{Ug%4WJy=q2Al=E&I zsg>kMW6JW1Vq);Rj>DoS04To8A|82r7iBo3fcHnhC#3W9d55@U89fyCN;7a3umSmL zd>*|vso2=qTQe4AxnL)z{`mAAjoa(W(Nv3!{jTpm*}xulvGtix5j8?!(i!XzyYNN< zq=llg_?Z5pc8v+r=}&gqyh8RD2d_99OG=*7J$;%31x4R&|Km$r07kpO`pk#uBbbA9 zFip1WQ=(lLakjrGzqPd${c+^^nzpmz^~K@$LWhlku@X&Ij@|dv^1YL%+v5&jmm&Wc z-xDxHhm>zUFkpNou$HPs85?(5+)u~Vx4=#fGgPg8s$KAYrmTJ)ABr|N!Dz-k>1q?H z%R*)hM2rBbwOorGeTu7jX1f>gbC?>#ocJw>Phwg2*s9iEYDEK-l?!T7sdyj3l!Uk3 zURkUZYMGgtjoaGgCi)oVTV9$U9DXGp1#Hq5xc$U#*hRcA=n6Enm|zJKugEkCTVMoH zz^++hetf+(l4~p0?qfAgtDVM8$m<$KiQjR2rUt~+*5#|*}G6zlp0)!L;jMB9#*QhUE(b(7+kt9*=2OL zs9tW-YUk$Wo?Lv797SPjT5=WxpRBS|G_Z?_0q+RQKZXnoC)lP1CQ_r1D2nPCSc$&V z8e2%|3?oIV^dKZ9@LC`Y1ldhg?8Y(>-G2-iJt*IDvX-FVi+}t=eK&j{nPZR>Li0P2 zwmCy#`#_EYBAg6xMHAHWX)+*9cWR~JP$fxLwbl$YygJ!pdvoh~nNEESAe(}@d65au z$AA1lTPRr4Oiw;RH;1IpAntNUJ&)5iMLGi(p2>9j05^NJ*z>+JQY5_hkB?D0;-+_h zX0d=Ygvgrs*-IbVJXYm>+lSDV;3WR9XHKN$xWcfgWlv$q8f0|0|J*++jOzAhWV4iP z16C{5t~20Sq|ulj37yH#5GaCy6-GSSUtEl`S3vx|V~)X&sfg&es}16+Z9@#^no}(9 zM?6pvfc)bOHc5T>MbO#5A1_&Aeor)Kz(fnab3_07=a)Q?G?P%kBp{fCet7vxW|{?- z`!A0DSvt%BF7qP%Q`je17E`cFf6~Vy(f@4IK9kTee0I7J_G~(Rkga$IFA>2xVzkjV za%YgD{O+m@;3*vmAYfZ>)bud8y`$BRXG8J-Gf^B8hA|1<%rv$p;-pwV65spR&M**o%(iJVc;7I4 zW({F1qoo40KODWF0Bl-B-23<6RiOsmtC>{FmrDf(T3I>3odNK3m4^&2?Ym_C%@lAm90u8k@y5D%m8FDw9DfPVyNy zElSEc@nQ{#{h#v(N6|)3?lNbG@DLBLaTEs^cSQf!mHLVkIq<12U%v}Ig*+m~?9=-* zCOwf{0+i*Ey%-K6AYiJU_q5h<(;-+s!ekl4BrN|dJMaw7@HTmYn^m?r!fd7|SYJQj zhy0EZ(8v3Hf}@i8AP6{cZ)6GziXpIZf8a$T+CkHOJIjacl7572IP9d3Du#J;o)=V; zHe%1fE;@bq|F(;)timUw#y;vMUyQPLe*M~~TwnfS{(XoB$WNg?pU8syL@xZFk_#Wy zVA)?RBs^0w2Q>BcpPAoSeDLh%1oJPZ$+a-K{yI{&Kjluh%gTA3fZ+kE#_ z#cNtFuIbtbDH)HRU{Py4<`zqVk(&tc128U+tea%E+up@|{Djcn#) zH10R2lUpEw9~JJVgqEdyooW7pV36Yg%p#PC(*B2uy^v@kBqE9U3@JIR=V*Ay(s)dP z)2&a%Xc5Cm2ctc-LPY)7;|Q8ULdc=sb*9P$9b}~!Tx3G@h|7LhK`2)^=znEI01r-- zXbN`(q9;tgEoXguN`mQcHJk;KmwcL0#7Fq z#3{_D-W4ID1uLw-E&q43lEA}uVRE^j(13k_PEKa%ARf_&f+#j9!3AZCnII%!lJd-|}359#VWOvdtA+nrl61IPrgGQ*Jq9SI03fFsF zMjZ(-uH1!Y4{d2diPxH7NEIb!B#-{MIQ{q9$pmQ*KAS0q)pX_kfuPfVV_GC&>)t0A z@c-JnlO%(aY;U%X`nJyQyBH_v@PNHiJ84CVBnieJcT5lz6}{h|$SUqEG=(|c`lhwX zn1T_F9=61S-9j2Zuyf|j*YPdJ=3;9rNsN%pZ_inbiIEFN2wjNo0D_Pr^h>hT-5j9%U*gxc#l5&3V4b0Ko{QkFX7p* zIs$qkB<$@gPanSZZ^r_WuRPCh>E9+8+_27qKbp_=q-15|SdXcRUv@VdSCEg?Sg322 zzk~$~eCT_mfSj$EkV)8+Ow59{7$H;5PO1K57ORh0%kY?SiHY{dw2k-RA3U4G)k0=d zwqZEBFHK&#`}w4u)dK^Huf!GW6KP!lVJ#Yp+e1#=f@?ipNeQL)L55A$*|Tjt^DTk! zM+dnNX3W6_DF<{G9tmX>w2LzBG2}K7uZ`v|;*4y+Hpy3dq{NAQ2d&T{@2O;uJ{7m{ zx*g?!so~>m=EhPd!MG0r0posm!Ci9t7Bk8dQwk8klqH->FX$t1Msm)APb?E=q}cfH zf&vBvfHGx` z4l+b?iXwDOBnT!2Od!QuTg7Sz#1AzY^IZOWrDo?>6-CHYlEYRzOM}&PPek7Ogd>VH zqHj%o7Z?64hX4AHY~Oii}y(<;wtK?+a2;OG;5I)00)#vAYjyZZ3`hF7=`H z9xQhRx5C{~OG|9fAY;B|#;v%X6`nAL%1O4u_B^CzQy2&90?xM^vwL754;nE-9@5jW zva;Gg%rwM6eDAYtArTT9YPQlD46SLcFtr2pw$*a|14UWpW}3P6nWOVv#_iqsaLY7JQ9InmyB47eL$?VazJ;@OICE^Jovrw(g zq8ue9W#v7nP66ahCvy&i!vGtSlkpKi6p*tT%F>pW(ESG*KFQAhEM`T0hd(EwC}AUrDXgU<*jKN|N(K>h1Q zlY^9+T6~l1HdIFd6b#=+AL8IZ!B*J!ykzT`d@H`I&1^@I;3U6AJVp`z%1Flw_S(GA0vBMkMW!7>i%q|s zca5AiF|(CAU_HUyUoW_n^XC`oJS&`r!IYt)<4{~PRz6Ca&rkQ((rZeKkH?L1x}zk8 zSz_*C?Kn%|Ja2j#A}+|n!V*hUHVAOP5-4~#tz^0ReZl9pQcMh{bJ@f_8X&D)AnO^3 z=>7w>SSj0Sid8%!*3BY*89}2i-2$fYq?(_{oPaCuD@eg7l&XQJe zOR!B0F;M*3ak%M+;)Y+hq}&@#2zz7mOi4NYd=U3WL*l`*GqtA=opigA0*q@pU=L93Z+<26fkNpKPI4W?sP2KWuNL0{@*GZ_4~+aCw#!7AV( zas~+&cp>7J@f6r`im1+q&JeaEn1f7^GfXhr@k95w`P}LQ1+M-*wPbgW_H@ZWcGvQ$77gXNKJTd~Gc?L6Q zj=9vebG$J)S>vk9X+&C{4LI@Nbd8S>6RftY8p3MSXpJwdembaVIy$Q}N&8|eJUStT;G>c? zA|1RKh4O}C;rKnX*I&Apbt?(b7%hd{Mj9;-(`JAKq59=#$o6CzF-Yu~3=gJ!NevDE zoUPdh)6v)1ULp^yMVo8r=qh)$oE)XNr5mSR=AG~;frRI5 zZ#LBceG>^nfYa}!{p3`uH)vvmCGHXUygMJNc8pB<>S<=lm?IQyM;N zR(bQ%#r)Z(i%Rt#p1^$rWpm}!ybcW@6Csi1Q_K!(85#Qvy)I{SbigyHXm5ULX6oi_6sEz{xF(4;{;DiHtigQRk{ndw{0Mrs)R0EdAqqVln^R*`AH;Wm7mnMA|LO3 zY>dF?unx0&^Nh@GI8l*(}n$Q5knDwywu~^;)nRw}{w7vw{Az{NyF^Mxl6d+SP08TbcmloL0TA zvz~|Q!@LipLxO4-#I{K*cg5_>g*9slNGtplGwuN(WeYY!rEpRfd!4q`_I&pT0V6Qq zd*6<=92X2%VTxvpv1?M;-p=$t31lbyEbEsK+z~#~+?!uFMQA;Mykd{{F(i3O4Eb`h zg1N4!r6mQp3Cxf8yZn5-Azs&K@A7@+@B~4ZK$b5=?|BiOywT35{M#Gnds@hG?O-i__`HmL*3x`DwYC6Gk zz|V-3dW8&7;2+&(ksURK|5SUbw7sQ)TB%L!HEL>B!8egO9!C_n4Gc{i{b`O3b)_;U z6ho6$h#AxE%|?pwWnp1w@>z@BE5e2fRmiyGr-l_z8Xi42M(&Mc{%uD~$wRZ#X3`Zj zI2FW;TKPDNASF@~XiQaOej%ZF2hvF&_rM0f`%t+C!y_mPymfF}wJxP&&ruxPa*aAo zUQtL62UT^MsI>b^%XOh_Dw_8|&_;whFf0N34j2t`=)iFGKcqw33>@IJtgb(hr54yc z0Ug&N4=t%H+a0vz7sYdG9YF8FG)rg>JzB^%9`UpQmZ^AGyz1EW{UHD zTqF_M3D$vCx_h6U7C*G!To_@C2(-a}35y{D087rAXT^)tvBHKRU2|hAE?d^*>*s)iz)5 z_Rt@xFPken>!$y4-fB2nIIg|5l!8u6$`GeD|4ZC$CPAHS?q5%b)$#BxxsLgJ5B`fJ@3d?!(N0J+! zQ!j6P=jVC!C~Nq>dW~EzUrMjBQ?#(0^B-j{MraR5&8wr$`T3|pOw+X_-SV;Hk_+rT zM$vEN?xYfq($hQ$Pp~{@)f1evyg!S^X(vSUvfLj@26_B7O&}V7Yt%@7*hF>TL%bc2 zk;`ZoHKq3i2t6B~Iga}oTR9GC_Hi7ivfT9sZcyt!`Lnn~ z)~9C$tHTUpiz$LP99V#0K^NORYWZlviL+>WUFYeQ|ePVW{Cdb5xO8rFI4vd ziw7O$PL+}q<6Xtc9Azm4=sRBU9FUW2-#oxD@K0LMW^9%xNyOSw5$+5DEUqtxk zmDjyt9vII;q2oRJ{~T{JZC#w)^ct-?SO3TS-geg8+VIHiW4$*DY2I;DBX58b?EyF0 z`bOOFW_Xu3*SVfzIfND+_R5XSyE54?kJY}yDTI@&z%;PkUMs!+DzC}A1EnwNQUMr;K~C!EAr4w zJ{35y^v5<l~-CTG1*SDrh-kCexePHcZQo2v3FX3=f$qpEVER+MoP8#aU=KGGdA`6KKoj55oCGJ5C8RtRAPw=p;h@di^%}7^g+Sd#?((RGl zP~z2hD0m~FEW3|KTo`1!W0gZh)@MzDz!Q5vuoF#E$189*Vfi9yTs|p zX50x11G_5;dOo8WeFpi#L)3{EV@#h4>=9TNKhCD*k?(uIP^C1okNM!wDfMRix%tf! zP1)1vvE)I_SNR5?eeJbKxX50!Vwozu=iDBD7V-88(+Z`2d9p+=QHdV!J*bG6zeeFm zo*ZhB{?o#DAR8O?y@x=<#ihAdv1~UyF)MLjBz|53&6x%TEts&xv>ffXyXg1iWvQ$^ zMYT2&k;K-fj=oxbBbCNu__&mVhqO3Cb*O|dcZ7?K5t(2@L~3PQTMj@c7)P)N%D`iKg_(%5D@^P$=bB1pp_d-2A90 zD&#~b_V&O{GN1%Oal4)RE)@C*z%C599If_rW|IRZ*{%#z6V$#-b?Re)5PdZriJ4>x z1CZ->L7AC(r${(f*YBP?HtRZzu?VDz5w6SRfY4gG#LLn?nV5c?Ehg%UvwGT8g|h60 zEkK?cKd|%S482gis&OK3Av{bo<%c%r2LmM zhk2!VIX93el462v0V#o}qF^p8Hd+2vD=6=nfm*%eTAX^rw8mk(M00j&dUvIGa@_ci zpRj=wN+L;mfxc~VPcxtg9BqGK37}B1SxBS`xYA{iw?B3z&|4stwiAmLN}`&`x~HD! z&LD)I!%;?gxjAXCML}qKVX`II;T>Lj?#vjq8`5$C2g?eT^<*>AageXBzuN&ICcCWl zR!fgQCO$q7-2lhoai$AS*J+WI;dC?3=WsS+Xqwk%3yBk@bi6nnRP9?EjuFfzlM2`p zh`-<>|GCT2qxgziSbAw%cTzeKzh%BEf-)78ftv&sx|oB6d|U?cSqu@`EvKU58Fi(| zILWih-=&;Uzid{|J2OE;B@`r~Voo#IrszRGvVIAR<_P?N#V8rhjA|Cv;-6BrmZ+h2GLYqTm$+q-MpSaEl>f*jID^8Y!On zbhiO(5TR-PJGR;CYd!17?9q8f;W2V*zW{+0cX>fcJ5nC_FkjDY`>h3(JGVvl6*LLu z6nz7@3^B~MScYtgtS92w3Ot23Lwt23HsJ)yb|cfNrBdI&n>pkU;IJMkCC1q+xjSs( zoCdnYev+5cU!Z$3?^16jj;(pDoRnR)T*mBr(Nt!Q$`;kefV2l>!0Uoq`l26qNL1;^IU>aVHDt?rc44PcbvP`1&Z7iz}SJX3HIbw7lL^Qy#xIYADgeuMd!hmR;aKsjlu4wW%vj8)RyikVYWa$TAa2sY(|m2XQzVOtSU0^^ z13EEK4GaNMmYV)YY?Y`;b;1Y>sCf*-rkg+$Rq4A#l^4r?L(c8}nd~og)wtN66oVZK zrN+eXn<=e2-@o>k1W9ZXGU$rL^J#@CM7lxmUV#@(ApdoHs)GDbLgLjc((yv&K&bWs zs*wZ=&m<0;w4a5M{*UTKDifF#qm9nTySC14B~lh3DkpfSDAv_;&|toXSj4HiDH@y_ z!+)6`z^k;P8sy6%wquoH%8pA}xyw!$2-kimrxc3*#Gq9j1Ss?Md>S5#MeplU{U$&R5odKqEDL^jUS3+!u5`Y5T(J;PINIDMv?2e!B<0*K0Hl-xgEn1D_An_f4 zVV`I`@DY zF;}z7_>y*}!N~$zrU|Wk?Jse5w8m7q+tR>!LxdJQs_heU3o^Rjfh=D@v1`k~?Q020 z1aKD18E>p_TDOLziH9@s61q1+*dB8UPMK+#yBB{Lk5L3qlgz;ARe(OtHl#D$NEHPX z#B%`E130Ionb+D~!G=uw2V{LeibF^DD*`&V7~0ma_PxMDns(`I_OdZrethW-;9r6B zHLl~~?fo&wF_KrQn90fL+t8U8>_j|x6?9jNZ7nGQ;_9HHqaZj~Y_JTNfh;Qw4OU&0 z@`igK`?Obwk&|rO5aUk!R7+KE+>ejXZjJ*ot`Dxg@mkY0wq&W1{;^A^2w7L$k0an@L<*1k#q0xN#e4Zbl zI-V*jG9hw8RWjm9^N*#Dtu^}ho5WP7pGVG>;Fob=^N;M9&7FngRh4>(~-8yRrG zRtk1KeJI+rU|R)T)-`fl#fQvKns6(l+oN3w3;EFUK}FZ1!tw0K48VUI2m87IR<)3c z_}~?`f@<$kYsKrYN^){&PofYj#41Q$vH|!mhArwAPodDNkGJyXAAfm92GrQ;o=1|G zf*th^8&Dz1lT%vtGcsf=L6R%DXGZi_-xRZ7-W6p&-`#ePi+ z>YjKRie2^>Y{kB@qnabY>cZJR%VM-V8=wFReNj38f7d# zSL+xf{1+4_5%Kn9eMAgm1z&lgD{)FZhJZ=w4M>WD@O*!rmc7Z%6b{6H&OjeyzeD~w zRp*Yn8?N}sSp&{6d{t9?KUSoFaFm^I2;amg!tjW}C3^K8+bvtV6kW50bO>ebiL}4I@Bc`Ypn}vZPjfDO!0GI6gLo%m@qVui%^t^#oo>5Sh z4Hg#)Llf(>s`iYglfNTfl+JK!bY!Nn#_iE!8f}k>W`?{Tlv;WD{fsQ*VvKE5E(IjO zJGM3R-j7%UCjL8y2y3n4bV7wh|EREqD58>+rGbJf6BM?G!J=9f-JS*|8apu|9Mm2# zgAb^DFtPl6XEHg9o9!+de#EJ0x_&{ivXrvg);>s7Pf42N?Hx8zCEcSdi6ulLn%^9y ziNO8EKW*dhmv2PKsVvI%mELqdSW3&WK&R!rX!Q+8@l~!u>z9csY#~`2_QcXh)Rntx zt4N7ql;DU-qzUJoE1MzE`5%!veN4qGKHnavr`r)~kJcupwOGAV+Sdh~mXhBGI{w!v z5?Mk%bvvg{VylKDNV`8v@}4=k&{_=$?1b5B5zT9mhFd@7)R3$1Qazbowof_|M$)~K zB|W{!@-dd&;*!~pl4xWqmfmf<2*5EbQj1?5bA^9mz9xBZ@@}8u!x}|%M+VS@jwLYZ zGe!o8Tt~GJQE*VPUtzrF%)+rla{X0=rH(FP%?kM#B1Ok%8+$hBs;*kqyQ0LMTE(}i z*225O{QV7DfDNe&U@SPQWipDByM~<@R?I3_eF{pEF69ReuSNq5=wgwJv9&`Da*W8+ zjVa%Y#z$Thy}Oem1BRSQDIJ_v-J7Zq*2`%DXT}S{0ub*bwZHvO=n_?^pg4c_aPM38 zTT}t1+ez(0(`!9*7JC@>hV64l&uRB8mUMQ4k>H(vo7PIP@gm6XNZ){{^B_0-dh2b9 zYr}1lik8=|<&9cmo@(}z1aL4g5M0WoMv0ruK%%T~-(*6iP%40%%K{EWl8q)CjS14E zt#&AnP+&W1&0T94zUw(gJe7*C>&2X_I~k}YOC(v*Gd{cR&2miM+ufh-YG%zG81muK z%-9wLALC^mLoN$Z*G21dB)0Q}s?qXT*V`xW)0W({zq3Dy#>XUuR&hFz4yx%FP($xA zfpcH-31xA=IDbITOv@|1S&bIAjcI_ur-NR?1;T|3vcl?BMFZB5P9R=>&qN-PTUUy< zx>91Ma63WR(R>`YcX0ZJ_=2XSto0L-Ld{g#Tn_|u=UEJ<4dI@A&!g_n)kId>*L}|T zH$QmnPb}UvMSK6Shdqq}AdF^ikvV!Hb^vpkR{e>qtb|Uf){!QeJ>2cx2_y`}vWzbW4_(83gFEgqpjS{+zhTZzydspbsdlgE4_M^sUGmijIB{ITg-7%*JT*0nq*+; z->DzB{#FeH)Lx~ZWsw3Rkp?jn>zY5wecV6uL~~%d7(6C zh|P$gGyk@pD>CF07v6M58&tBF1<;HjFEuARYd^%cj?)~qnH%5-Hi`#hgnv=Atc<9nZ4>ifs zKzW7rKlnXT5NH_+q-H{N8LF-4wHWa4ly*y^KnJbIx$Ds}&~YXACaaP>zn~~5?BeAM zG`_m#b&io2~kAFPClNEXZ|IUKdwc{a7M_&*%rnQf0h-+CG)kETA9AWpv;9a$1k zG1lz;)JZ0|bhu(^jo4FTS;Q>Izhn1fjcUH`W$zw+vF`qz++uGgEkm6hwtCPUmtD8U zaykZ$h<7tCY!IExaWiTy-<(#6=O61xHFj)hLgI-@m+!@ZzotPgB`KK+WX$g|ZM0pj z+V9N{3|)^byUr`UzIWSJqfE{PGHaE}{UB}1welDI8`&Kw=byfhGrCf&g%#so{*YEp zilt$aQ$H&Io{{4Ck&_s2EHtxa#PrKMR0Ff5>UHvFxlI$hBWukjSFh2G1}Rkl@Ilrq z3r98_Xj(M&DwzLr6}w?2guWmnLolrYD)4dNt@!G1C-%gc5h|J>$G9r%pecF={+<9} zaNjpQirC1M`|!-STGH;@8ieg4zyZh z5t;Xdx6K$Pj<#ao54aW2Lrc9&D4{*7TLNDb@~!`$R?a+{sy1xnNEz~)qt`r3h@yU)OI#X0u`#Ph)chu`gCU=+;lx?&?YQ>M_tuJQF4vT~IsWY^)QJ`K};& ztAkyS*KKU(tJA+-+dFOa;yE-*aBt`8h&3jhD>;u%4)U4Q*4O_URbv;D@)i1Pek_1-^`@sqDrExm~Ls9#^rG?^i6(=NRP=$$Cy0EYIN-S z7DF#GK6YQBOSyhaci8A%Ge=&s{-8N#qt!D1CABXNqOPw8`%2FBHZMxq75NN%73js@ zx7S)!`QhetBIS7-c3!XNYRAig3TifKF0+*Qc;0}ecTd95>37|^a?0gh8&(fYc#ol; z_dVOQO^T5u5l&*Zzq>&)@*8~;bNi~2j-Y=*7H z9`=st0+Ec{&4iLEPd{)w;+}xK-;oIzj6n$A7uhr^O!zLA;wZj=S;y#fSjg}y*PfYM zk5pvw-U_h}1;z6%S-Mv`QAJUakxu=>A3S!r8;dfldVL)zO5>@goOxtupVSTJJExk- z@j9ivOEF}NT$gp2WqI3ihTmXg>N*$+KUY0odm*Rq_CeH@CfTIj!DZ#NtN)mxy9t^O zUEo_`!jI}2`#${Ajv%3MhH2klC%`Jx61We6G9hwCJ<3o5EFI|%#+kESNd zpQ6SDqMBSLWMy1h@3L2UEuLeMM?Y?09fAm-26)Aj2P^^P(z5&A)9d{^N++tNhBf`v z(>Ih)zJ70LRoEWa7K< zs9>8z$jv~0HDvov0hd3&4FC4@=%@K}9Z#QbzpL*vH&WLM+rb>b+!(**estH3lTdk+ z##D4G>89nA+js4&vTf>p@@ZHc4Bu?*YRtzkB|Z@vXgR@f=~3Ct%BK{8*SzxS=NNnG z93=}>U*MM;?FSXzLr+@YT;m0=7T^ zibUff9#VgKbrLhjK4o;!I;uS={UEjcM2d#=NV~1W%fb~%^7bQq8RG)|MCO{NucOYM zouV~|P31d4xOgl>+__YF5)d`-xHnf`(o6_^;GO}4SzrC~a~-Mi>%npgBexHE+pbKk zE+F2RSnxU+c!|j;gEz(~T_Nv|I-_PcDW^!=#dOa~83Xn)x4O8e+HM_^6>ASS#J=PO zunp!%5T;w{eXnXnY>%%sfl@br+r@!K%VeswSvQtH!2o*fY8us5d-a6VQ^hnda&TCZ zI%UfDH=k*EAF%7sa@;P&Z&DosuopYl!Vkn;J2Fu4%vND8c%MP0tlxL>3G{R{u5oVt z5s;?Z-TXAUIkm5zBX8*v!Bhvs!%mTaO!r zMMVkt#qz-_M`Itq#|I@?ikEZNaJ+(#b^={+IctL@-3cRSSuTak)xYMu|JfpQQ=dVO zTSi1CZu&(~L4=#MK=hrJwmYsLCZ6ERwKgmc+8vOvsa+q3t~iI)>9ueqb9G~sXn2U% zR+TenNC8tJ&)L7o3R^zY>2*GPJmZ|{kOQ332n819kvMNG@7q^wCRWShEh~LBU4dWl z)asd&E&`FcwN6H}LnXF>A$v1L{02L6wNsX_iSYG7LzkFXnX&iq6!#}H8-G);@-;=K zJD!(JnbI>YHydxX3fuP~<`3`{Xbj}6DdJcU*?i0OJ8H73$)Ypx^kyR^wbrw=im(QS zTd_Mg7vd>vKPxteBc_}KvaY!XnF{Me7hGdc_;+|1Nq@5fjt;NA(X+XAAqB&G!OH^2 zc*Yc?ACTkkBdI@s(U!l8*2g6(FG$unS3#WbgP`zi=>a5~DAu%n!v6AQ(*9O4>(UAw zV-ZM*y?@Lxchu`u-@o1{P>_Ar!qfU0CI&{`qc*p(vZa% zLTpAN)5eVhC$D7ml=`Cb$3N+=S5r!$@(t`}=pz)L`G>o1&3LhH-_k`zT~KKL)~CUl zcb8qRqu#*W9-r_#$YobTseBhC5t@Rpi2CKfY%k|Wxi4mKiZW!%y-<0}Y>m!t=sd?| zKRD&jDlB*CN2~vdV2m&ZtuvnB;^4!Km4;%hll$oS)8xAWl}YIOiJ5~Z3Rin4TjZ=^ z!*Jp=dwD`#?rM9~Xuz&%w%-Tdm;^p8iPz|`iuB;cIv*P2nHBr|mJ5Ud-P*pCPtNRD zMs+h=+W1+5uH$K|zqUK8fj4?7EE_tERgMD!O=v;&LN|t>V1#whH>~`oDn%h8R+yH! zQ;si)beLs%?EE(Z?}M9|s~I+5eH*(An0#n*kvgaCR{7;Q-fGw6tf+1RB^}qT;Rhlz zCi*Hp5j4-d%Ecsz2jkRWE<6SB77;0@1_Y10%R~fPY*9J<7GXT{$c;hCXU`v0e*5qG zl}fAddot2YDYD(Ac`czvn*AsS#$C(I!99P;15F(G-?}~{eWsD>DbRF z>pnw%(y>3}N>?hsYaXkwW805NWHTw7lyYVE)?o~Lc=_fuYr)x}@`Jxk5ISn8jh58> zr)~4ck0tA5bUKAbcMZx}`U4omaw7IUp47=Hv%xtjNo8MY`J)Oz!gP&{-VyGC*$n&i zB=^Uu?B}`u^+Tkh0qXGkn_o#yOH(J^e@~+`JrO>r9JqUg2i}a*QABCbCy|RA&7q0l zvq}&pA#Ro7gDp&Kf&FSYc^D|C38(Cc>~+~iZ#8o9{^d~G3&D*<22ST+6B?Q>4KNTH zQ*5wuz2+WAA8im3Sg4W(4g;551_Vz)Gwjml>W_$7*Z2_2*^oot!Vlf*EmQL$hU+CN zBx_^>KMXDVE+~)frK`PBLrO46_SFX1dL8#!qd%X+b}o ztfc$PeYH-j$e9zyf*XkdEmO{%q3&;66Y@)-En=#=A|C$lNGTDCHxFXGKKvzx;Aw zFkPtw!vH`1Mi#fktoM&i{hOawA%dsTM65i~3N7X+z09XyyKmEh5sP_LE#g1pj{Lu% zV_GN#Ts5c8nV7WAAD5EgyQVDRQB?XP4X}IV_xNDuW**fHMGU6a@qwwTb$#0q*TOdo zxLnowe_pPE`1eJGurEIE`Y(z6!U&-arzVE%G4%dOewsS2EM?HKDH&CNYU2=*L#vQ2 z?$dR3(Vym@h~y|Nw?aiF#-DDPKt)f``;o@t|McL2r9JR{<>Ss+!Ui)W3!!q)g7WVy za3beRYbJ9-LP!zMyVvKDzRE~ZtGWbgP6ge>#6(Ho^{e=vfE`|&8mG6B;Fq{;UiS#DcME?TE9IBPe8dE;Q~up zgxcFJU`!OG%lY&JY*GEv>pR)d8tH|%nKFPDL5HdnvJ`1^Jw(z^EJRMtCypAGG*eOv zD#3s`*b-W!?uiKr&mMBFmx0J@k<`ZO+JMDl!#FQ*zK-Esu3M^GwCv{;7zS^G70CY8 zCF591fucz$Y&}kIeST6_RCIK>FIU3~S|=U^;JtaK%VYZAK6AS?3{LOE$5>`ymlJ-1 zIJRjxXGiq6;g+0*M(CIf12Isfco!^4_4seg%N7mj3LW3}**NR0x`)X{*c_#sQMAz2 zE=GqMB}@+(oXGAhG!rAVso&n-F6r@67f~n1D*+5}7eSojfxR;B_6#yc9M0h~m}@jseS z)&T@BJm4m}m{>O!xozoM$?a5Q72Vt_>YqYATx(}%XGuoQcD+@I*(QKc?FDhRLMOE&FO@nLZj+A;z4%6TPP^x& z=Sfcchp$hyZG6g&Ni7R>xbQ$8Q>l|$IV)(argnP~uAa}AiFz}Rh^Co19ZRu>m0LEl zs^<|4&N|xd)*Kd-tbHC-I<0}Lg^<z;KWB=yzKW$1V<3}!d&Y?R|ZbwdhtyPpS1J@}DWz=h$*)2^Xj(T9wNX&O5|f|ywn zt+mdDX(KP|OK}+)`u!%hsp1z&PUG5m`3T!jL#Sz$S>f$6D*f)%Tk!7}y1swzLI6yL zriISb^O}YIyU$RdH;lzjVP|12bWC|Kji)@vF9>gf#yh~*crvzt!rV03aE3BpDw(%Y zCvPnjhS%ShtP>`1#twbc`jK>3-xhvxKJ16a@*6O-*E!hsJ zthjB0ghO{P8K)mtQ_2F*N{;x$T2UkRHyXgg<5DZbBPTbuOmk$J=M3$51=*_sJ=9IL3c z*$XUCD{~RB3}%7?0l?T&kYxLni*i2>qs`tHoxP^NsPdgme(_BcT?s9ECKrMLe0nnR zTW&WU^WKbnNM}h@2p*2zo)rsli+Z!DxdY_McuDpN_OOGC4B&Vgs)@zY;~pHaraJaT zmw)$O@(^oV)G)HGc75c4zJ1_O+Zj8I?>pcy3U;<1%)-#Ix)P^>&jXOY8$}Vjg}5oH z41tz|#cC5AhQ*VtQZ5ObgXK2wqk_$)`p0cBkrRIelg-Y1!PD~8Dv#=p(#!6)bf3w2 z!BIG)aOOAK^@ZP0tB^uPu}Qk_SKgJ3FCn$qO*)E`Ev3OS-^f*uK07EyhotIfklfe={DKgZ{&~ZG;;;@5P2T zxf{?6^nx}6O2 zNLC2M2p#<%URrto2O8sDekrX4qFs0jrHC*a^n83$Jb1-@<*wzn-BJ)42iFd&86&Q3 zcXMpuD6zU`Ra~XH>?|2i@hh7V#cuL54w-L{-Yi~LmOEJs;94CZLvz34z8Iz>%DyO| z-J2sNMrB6g>hordL&u@|h*7>DPJlE66&*I7Sk03`QlYX9;G)`eI!BFT7++>FLsjId z%S2rI-dW9Qotu(kO~H9V1${ZaKw0=Az#4GmH7VWXoiDn>=CHLvcv9=#jzyb?9)mN< z)_zn(>ROD>k*g6D-gYDHj6#@L>D^~@^C!12#SROG)1n>3cltVAwI8AcBbcplRgwjy zLn&dCn1mTXDpzd_?Efr|lR1ItE#s??zkDbC23t4TSC@MkNUv!Dz3KjySuelwn3ZE` z{TD-$%x)Q=IKfgD&2id-Lk2r+ip!xM&x9JX@Aj}&G~WB?pnE_7qM%@=mkU%BO=e`v5;P$tR-r(G5y)+yw|glGf0r?BPTS6@hVUdLLTzZos=RMFkDGu zl^Ws^^HN-OcJW10uuZAMYk`l$d-p>y%_j&jCw4;3mQ(&59~l=HM^$`q|DU5mU`sdN zF^q;7Afs{!r)Sgr0LFrZ)KqP#M)H7Qmnz{v?GI$ID)6$iNL;zIFGA7&$2^uIbQd7n z>2n9FebKLwT$fB!w6^9*2U0d8C#Qk@2?QIqIk^&P$hEiL0>2>iq;#!p!^6mWBc?I` zWxc&XxJ1}h6_=k^N{~358O1)1%AYoVFi)_Wwb0x5Sbtiyid X[, 1])) * ((0.75 <= X[, 1]) & (1 > X[, 1])) * (7.5)) noise_sd <- sd(f_XW) / snr y <- f_XW + rnorm(n, 0, 1) * noise_sd - -# Split data into test and train sets -test_set_pct <- 0.2 -n_test <- round(test_set_pct * n) -n_train <- n - n_test -test_inds <- sort(sample(1:n, n_test, replace = FALSE)) -train_inds <- (1:n)[!((1:n) %in% test_inds)] -X_test <- as.data.frame(X[test_inds, ]) -X_train <- as.data.frame(X[train_inds, ]) -y_test <- y[test_inds] -y_train <- y[train_inds] ``` ## Python @@ -161,8 +149,31 @@ f_XW = ( ) noise_sd = np.std(f_XW) / snr y = f_XW + rng.normal(0, noise_sd, n) +``` + +:::: + +Split the data into train and test sets -# Split data into test and train sets +::::{.panel-tabset group="language"} + +## R + +```{r} +test_set_pct <- 0.2 +n_test <- round(test_set_pct * n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds, ]) +X_train <- as.data.frame(X[train_inds, ]) +y_test <- y[test_inds] +y_train <- y[train_inds] +``` + +## Python + +```{python} test_set_pct = 0.2 X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=test_set_pct, random_state=random_seed @@ -173,7 +184,9 @@ X_train, X_test, y_train, y_test = train_test_split( ## Sampling and Analysis -We sample from a BART model of $y \mid X$ with 10 grow-from-root samples (@he2023stochastic) followed by 100 MCMC samples (this is the default in `stochtree`). We also specify $m = 100$ and we let both $\sigma^2$ and $\sigma^2_{\mu}$ be updated by Gibbs samplers. +We sample from a BART model of $y \mid X$ with 10 grow-from-root GFR samples (@he2023stochastic) followed by 100 MCMC samples (this is the default in `stochtree`), run for 4 chains initialized by different GFR iterations. + +We also specify $m = 100$ trees and we let both $\sigma^2$ and $\sigma^2_{\mu}$ be updated by Gibbs samplers. ::::{.panel-tabset group="language"} @@ -183,7 +196,12 @@ We sample from a BART model of $y \mid X$ with 10 grow-from-root samples (@he202 num_gfr <- 10 num_burnin <- 0 num_mcmc <- 100 -general_params <- list(sample_sigma2_global = T) +general_params <- list( + sample_sigma2_global = T, + num_threads = 1, + num_chains = 4, + random_seed = random_seed +) mean_forest_params <- list(sample_sigma2_leaf = T, num_trees = 100) bart_model <- stochtree::bart( X_train = X_train, @@ -203,19 +221,100 @@ bart_model <- stochtree::bart( num_gfr = 10 num_burnin = 0 num_mcmc = 100 -general_params = {'sample_sigma2_global': True} -mean_forest_params = {'sample_sigma2_leaf': True, 'num_trees': 100} +general_params = { + "sample_sigma2_global": True, + "num_threads": 1, + "num_chains": 4, + "random_seed": random_seed, +} +mean_forest_params = {"sample_sigma2_leaf": True, "num_trees": 100} bart_model = BARTModel() bart_model.sample( - X_train = X_train, - y_train = y_train, - X_test = X_test, - num_gfr = num_gfr, - num_burnin = num_burnin, - num_mcmc = num_mcmc, - general_params = general_params, - mean_forest_params = mean_forest_params + X_train=X_train, + y_train=y_train, + X_test=X_test, + num_gfr=num_gfr, + num_burnin=num_burnin, + num_mcmc=num_mcmc, + general_params=general_params, + mean_forest_params=mean_forest_params, +) +``` + +:::: + +Plot the mean outcome predictions versus the true outcomes + +::::{.panel-tabset group="language"} + +## R + +```{r} +y_hat_test <- predict( + bart_model, + X = X_test, + terms = "y_hat", + type = "mean" +) +plot( + y_hat_test, + y_test, + xlab = "predicted", + ylab = "actual", + main = "Outcome" ) +abline(0, 1, col = "red", lty = 3, lwd = 3) +``` + +## Python + +```{python} +y_hat_test = bart_model.predict(X=X_test, terms="y_hat", type="mean") +lo, hi = min(y_hat_test.min(), y_test.min()), max(y_hat_test.max(), y_test.max()) +plt.scatter(y_hat_test, y_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Outcome") +plt.show() +``` + +:::: + +Plot the $\sigma^2$ traceplot + +::::{.panel-tabset group="language"} + +## R + +```{r} +sigma_observed <- var(y - f_XW) +sigma2_global_samples <- extractParameter(bart_model, "sigma2_global") +plot_bounds <- c( + min(c(sigma2_global_samples, sigma_observed)), + max(c(sigma2_global_samples, sigma_observed)) +) +plot( + sigma2_global_samples, + ylim = plot_bounds, + ylab = "sigma^2", + xlab = "Sample", + main = "Global variance parameter" +) +abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") +``` + +## Python + +```{python} +sigma_observed = np.var(y - f_XW) +global_var_samples = bart_model.extract_parameter("sigma2_global") +plt.plot(global_var_samples) +plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) +plt.xlabel("Sample") +plt.ylabel(r"$\sigma^2$") +plt.title("Global variance parameter") +plt.show() ``` :::: diff --git a/vignettes/bcf.qmd b/vignettes/bcf.qmd index c392ab40e..2920bc192 100644 --- a/vignettes/bcf.qmd +++ b/vignettes/bcf.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) @@ -30,6 +30,8 @@ in $\mu(\cdot)$ to reduce confounding bias. # Setup +Load necessary packages + ::::{.panel-tabset group="language"} ## R @@ -50,8 +52,28 @@ from stochtree import BCFModel :::: +Set a seed for reproducibility + +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed <- 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +:::: + We also define several simple functions that configure the data generating processes -used in this vignette. +used in this vignette ::::{.panel-tabset group="language"} @@ -78,30 +100,31 @@ tau2 <- function(x) { ## Python ```{python} -# Note: Python uses 0-based column indexing (R's X[,1] = Python's X[:,0]) def g(x): return np.where(x[:, 4] == 1, 2, np.where(x[:, 4] == 2, -1, -4)) + def mu1(x): return 1 + g(x) + x[:, 0] * x[:, 2] + def mu2(x): return 1 + g(x) + 6 * np.abs(x[:, 2] - 1) + def tau1(x): return np.full(x.shape[0], 3.0) + def tau2(x): return 1 + 2 * x[:, 1] * x[:, 3] - -rng = np.random.default_rng(101) ``` :::: # Binary Treatment -## Demo 1: Nonlinear Outcome Model, Heterogeneous Treatment Effect +## Demo 1: Linear Outcome Model, Heterogeneous Treatment Effect We consider the following data generating process from @hahn2020bayesian: @@ -109,7 +132,7 @@ We consider the following data generating process from @hahn2020bayesian: \begin{aligned} y &= \mu(X) + \tau(X) Z + \epsilon\\ \epsilon &\sim N\left(0,\sigma^2\right)\\ -\mu(X) &= 1 + g(X) + 6 \lvert X_3 - 1 \rvert\\ +\mu(X) &= 1 + g(X) + 6 X_1 X_3\\ \tau(X) &= 1 + 2 X_2 X_4\\ g(X) &= \mathbb{I}(X_5=1) \times 2 - \mathbb{I}(X_5=2) \times 1 - \mathbb{I}(X_5=3) \times 4\\ s_{\mu} &= \sqrt{\mathbb{V}(\mu(X))}\\ @@ -124,14 +147,14 @@ Z &\sim \text{Bernoulli}\left(\pi(X)\right) ### Simulation -We draw from the DGP defined above. +We generate data from the DGP defined above ::::{.panel-tabset group="language"} ## R ```{r} -n <- 500 +n <- 1000 snr <- 3 x1 <- rnorm(n) x2 <- rnorm(n) @@ -149,8 +172,43 @@ y <- E_XZ + rnorm(n, 0, 1) * (sd(E_XZ) / snr) X <- as.data.frame(X) X$x4 <- factor(X$x4, ordered = TRUE) X$x5 <- factor(X$x5, ordered = TRUE) +``` -# Split data into test and train sets +## Python + +```{python} +n = 1000 +snr = 3 +x1 = rng.normal(size=n) +x2 = rng.normal(size=n) +x3 = rng.normal(size=n) +x4 = rng.binomial(1, 0.5, n).astype(float) +x5 = rng.choice([1, 2, 3], size=n).astype(float) +X = np.column_stack([x1, x2, x3, x4, x5]) +mu_x = mu1(X) +tau_x = tau2(X) +pi_x = ( + 0.8 * norm.cdf((3 * mu_x / np.std(mu_x)) - 0.5 * X[:, 0]) + + 0.05 + + rng.uniform(size=n) / 10 +) +Z = rng.binomial(1, pi_x, n).astype(float) +E_XZ = mu_x + Z * tau_x +y = E_XZ + rng.normal(size=n) * (np.std(E_XZ) / snr) +X_df = pd.DataFrame({"x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5}) +X_df["x4"] = pd.Categorical(X_df["x4"].astype(int), categories=[0, 1], ordered=True) +X_df["x5"] = pd.Categorical(X_df["x5"].astype(int), categories=[1, 2, 3], ordered=True) +``` + +:::: + +Split data into test and train sets + +::::{.panel-tabset group="language"} + +## R + +```{r} test_set_pct <- 0.2 n_test <- round(test_set_pct * n) n_train <- n - n_test @@ -173,54 +231,37 @@ tau_train <- tau_x[train_inds] ## Python ```{python} -n = 500 -snr = 3 -x1 = rng.normal(size=n) -x2 = rng.normal(size=n) -x3 = rng.normal(size=n) -x4 = rng.binomial(1, 0.5, n).astype(float) -x5 = rng.choice([1, 2, 3], size=n).astype(float) -X = np.column_stack([x1, x2, x3, x4, x5]) -mu_x = mu1(X) -tau_x = tau2(X) -pi_x = (0.8 * norm.cdf((3 * mu_x / np.std(mu_x)) - 0.5 * X[:, 0]) - + 0.05 + rng.uniform(size=n) / 10) -Z = rng.binomial(1, pi_x, n).astype(float) -E_XZ = mu_x + Z * tau_x -y = E_XZ + rng.normal(size=n) * (np.std(E_XZ) / snr) - -# Convert to DataFrame with ordered categoricals (matching R's factor(..., ordered=TRUE)) -X_df = pd.DataFrame({"x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5}) -X_df["x4"] = pd.Categorical(X_df["x4"].astype(int), categories=[0, 1], ordered=True) -X_df["x5"] = pd.Categorical(X_df["x5"].astype(int), categories=[1, 2, 3], ordered=True) - -# Split data into test and train sets test_set_pct = 0.2 n_test = round(test_set_pct * n) n_train = n - n_test test_inds = rng.choice(n, n_test, replace=False) train_inds = np.setdiff1d(np.arange(n), test_inds) -X_test = X_df.iloc[test_inds] +X_test = X_df.iloc[test_inds] X_train = X_df.iloc[train_inds] -pi_test, pi_train = pi_x[test_inds], pi_x[train_inds] -Z_test, Z_train = Z[test_inds], Z[train_inds] -y_test, y_train = y[test_inds], y[train_inds] -mu_test, mu_train = mu_x[test_inds], mu_x[train_inds] +pi_test, pi_train = pi_x[test_inds], pi_x[train_inds] +Z_test, Z_train = Z[test_inds], Z[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +mu_test, mu_train = mu_x[test_inds], mu_x[train_inds] tau_test, tau_train = tau_x[test_inds], tau_x[train_inds] ``` :::: + ### Sampling and Analysis -We simulate from a BCF model using "warm-start" samples fit with the grow-from-root algorithm (@he2023stochastic, @krantsevich2023stochastic). This is the default in `stochtree`. +We simulate from a BCF model initialized by "warm-start" samples fit with the grow-from-root algorithm (@he2023stochastic, @krantsevich2023stochastic). This is the default in `stochtree`. ::::{.panel-tabset group="language"} ## R ```{r} -general_params <- list(num_threads=1) +general_params <- list( + num_threads=1, + num_chains=4, + random_seed=random_seed +) bcf_model <- bcf( X_train = X_train, Z_train = Z_train, @@ -228,6 +269,9 @@ bcf_model <- bcf( propensity_train = pi_train, X_test = X_test, Z_test = Z_test, + num_gfr = 10, + num_burnin = 1000, + num_mcmc = 100, propensity_test = pi_test, general_params = general_params ) @@ -236,30 +280,39 @@ bcf_model <- bcf( ## Python ```{python} +general_params = {"num_threads": 1, "num_chains": 4, "random_seed": random_seed} bcf_model = BCFModel() bcf_model.sample( - X_train=X_train, - Z_train=Z_train, - y_train=y_train, + X_train=X_train, + Z_train=Z_train, + y_train=y_train, propensity_train=pi_train, - X_test=X_test, - Z_test=Z_test, - propensity_test=pi_test, - general_params={"num_threads": 1}, + X_test=X_test, + Z_test=Z_test, + num_gfr=10, + num_burnin=1000, + num_mcmc=100, + propensity_test=pi_test, + general_params=general_params, ) ``` :::: -Inspect the samples initialized with an XBART warm-start. +Plot the true versus estimated prognostic function ::::{.panel-tabset group="language"} ## R ```{r} -mu_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "prognostic_function") -tau_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "cate") +mu_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "prognostic_function" +) plot( rowMeans(mu_hat_test), mu_test, @@ -268,6 +321,40 @@ plot( main = "Prognostic function" ) abline(0, 1, col = "red", lty = 3, lwd = 3) +``` + +## Python + +```{python} +mu_hat_test = bcf_model.predict( + X=X_test, Z=Z_test, propensity=pi_test, terms="prognostic_function" +) +mu_pred = mu_hat_test.mean(axis=1) +lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) +plt.scatter(mu_pred, mu_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Prognostic function") +plt.show() +``` + +:::: + +Plot the true versus estimated CATE function + +::::{.panel-tabset group="language"} + +## R + +```{r} +tau_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "cate" +) plot( rowMeans(tau_hat_test), tau_test, @@ -276,6 +363,31 @@ plot( main = "Treatment effect" ) abline(0, 1, col = "red", lty = 3, lwd = 3) +``` + +## Python + +```{python} +tau_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="cate") +tau_pred = tau_hat_test.mean(axis=1) +lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) +plt.scatter(tau_pred, tau_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Treatment effect") +plt.show() +``` + +:::: + +Plot the $\sigma^2$ traceplot + +::::{.panel-tabset group="language"} + +## R + +```{r} sigma_observed <- var(y - E_XZ) sigma2_global_samples <- extractParameter(bcf_model, "sigma2_global") plot_bounds <- c( @@ -295,33 +407,19 @@ abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ## Python ```{python} -mu_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="prognostic_function") sigma_observed = np.var(y - E_XZ) -mu_pred = mu_hat_test.mean(axis=1) -lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) -plt.scatter(mu_pred, mu_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") -plt.show() - -tau_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="cate") -tau_pred = tau_hat_test.mean(axis=1) -lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) -plt.scatter(tau_pred, tau_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") -plt.show() - global_var_samples = bcf_model.extract_parameter("sigma2_global") plt.plot(global_var_samples) plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) -plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") +plt.xlabel("Sample") +plt.ylabel(r"$\sigma^2$") +plt.title("Global variance parameter") plt.show() ``` :::: -Examine test set interval coverage. +Examine test set interval coverage of $\tau(X)$. ::::{.panel-tabset group="language"} @@ -332,7 +430,7 @@ test_lb <- apply(tau_hat_test, 1, quantile, 0.025) test_ub <- apply(tau_hat_test, 1, quantile, 0.975) cover <- ((test_lb <= tau_x[test_inds]) & (test_ub >= tau_x[test_inds])) -mean(cover) +cat("CATE function interval coverage: ", mean(cover) * 100, "%\n") ``` ## Python @@ -341,12 +439,12 @@ mean(cover) test_lb = np.quantile(tau_hat_test, 0.025, axis=1) test_ub = np.quantile(tau_hat_test, 0.975, axis=1) cover = (test_lb <= tau_test) & (test_ub >= tau_test) -print(f"Coverage: {cover.mean():.3f}") +print(f"CATE function interval coverage: {cover.mean() * 100:.2f}%") ``` :::: -## Demo 2: Linear Outcome Model, Heterogeneous Treatment Effect +## Demo 2: Nonlinear Outcome Model, Heterogeneous Treatment Effect We consider the following data generating process from @hahn2020bayesian: @@ -354,7 +452,7 @@ We consider the following data generating process from @hahn2020bayesian: \begin{aligned} y &= \mu(X) + \tau(X) Z + \epsilon\\ \epsilon &\sim N\left(0,\sigma^2\right)\\ -\mu(X) &= 1 + g(X) + 6 X_1 X_3\\ +\mu(X) &= 1 + g(X) + 6 \lvert X_3 - 1 \rvert\\ \tau(X) &= 1 + 2 X_2 X_4\\ g(X) &= \mathbb{I}(X_5=1) \times 2 - \mathbb{I}(X_5=2) \times 1 - \mathbb{I}(X_5=3) \times 4\\ s_{\mu} &= \sqrt{\mathbb{V}(\mu(X))}\\ @@ -369,14 +467,14 @@ Z &\sim \text{Bernoulli}\left(\pi(X)\right) ### Simulation -We draw from the DGP defined above. +Generate data from the DGP above ::::{.panel-tabset group="language"} ## R ```{r} -n <- 500 +n <- 1000 snr <- 3 x1 <- rnorm(n) x2 <- rnorm(n) @@ -394,8 +492,43 @@ y <- E_XZ + rnorm(n, 0, 1) * (sd(E_XZ) / snr) X <- as.data.frame(X) X$x4 <- factor(X$x4, ordered = TRUE) X$x5 <- factor(X$x5, ordered = TRUE) +``` + +## Python -# Split data into test and train sets +```{python} +n = 1000 +snr = 3 +x1 = rng.normal(size=n) +x2 = rng.normal(size=n) +x3 = rng.normal(size=n) +x4 = rng.binomial(1, 0.5, n).astype(float) +x5 = rng.choice([1, 2, 3], size=n).astype(float) +X = np.column_stack([x1, x2, x3, x4, x5]) +mu_x = mu2(X) # mu2 for Demo 2 +tau_x = tau2(X) +pi_x = ( + 0.8 * norm.cdf((3 * mu_x / np.std(mu_x)) - 0.5 * X[:, 0]) + + 0.05 + + rng.uniform(size=n) / 10 +) +Z = rng.binomial(1, pi_x, n).astype(float) +E_XZ = mu_x + Z * tau_x +y = E_XZ + rng.normal(size=n) * (np.std(E_XZ) / snr) +X_df = pd.DataFrame({"x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5}) +X_df["x4"] = pd.Categorical(X_df["x4"].astype(int), categories=[0, 1], ordered=True) +X_df["x5"] = pd.Categorical(X_df["x5"].astype(int), categories=[1, 2, 3], ordered=True) +``` + +:::: + +Split into train and test sets + +::::{.panel-tabset group="language"} + +## R + +```{r} test_set_pct <- 0.2 n_test <- round(test_set_pct * n) n_train <- n - n_test @@ -418,37 +551,17 @@ tau_train <- tau_x[train_inds] ## Python ```{python} -n = 500 -snr = 3 -x1 = rng.normal(size=n) -x2 = rng.normal(size=n) -x3 = rng.normal(size=n) -x4 = rng.binomial(1, 0.5, n).astype(float) -x5 = rng.choice([1, 2, 3], size=n).astype(float) -X = np.column_stack([x1, x2, x3, x4, x5]) -mu_x = mu2(X) # mu2 for Demo 2 -tau_x = tau2(X) -pi_x = (0.8 * norm.cdf((3 * mu_x / np.std(mu_x)) - 0.5 * X[:, 0]) - + 0.05 + rng.uniform(size=n) / 10) -Z = rng.binomial(1, pi_x, n).astype(float) -E_XZ = mu_x + Z * tau_x -y = E_XZ + rng.normal(size=n) * (np.std(E_XZ) / snr) - -X_df = pd.DataFrame({"x1": x1, "x2": x2, "x3": x3, "x4": x4, "x5": x5}) -X_df["x4"] = pd.Categorical(X_df["x4"].astype(int), categories=[0, 1], ordered=True) -X_df["x5"] = pd.Categorical(X_df["x5"].astype(int), categories=[1, 2, 3], ordered=True) - test_set_pct = 0.2 n_test = round(test_set_pct * n) n_train = n - n_test test_inds = rng.choice(n, n_test, replace=False) train_inds = np.setdiff1d(np.arange(n), test_inds) -X_test = X_df.iloc[test_inds] +X_test = X_df.iloc[test_inds] X_train = X_df.iloc[train_inds] -pi_test, pi_train = pi_x[test_inds], pi_x[train_inds] -Z_test, Z_train = Z[test_inds], Z[train_inds] -y_test, y_train = y[test_inds], y[train_inds] -mu_test, mu_train = mu_x[test_inds], mu_x[train_inds] +pi_test, pi_train = pi_x[test_inds], pi_x[train_inds] +Z_test, Z_train = Z[test_inds], Z[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +mu_test, mu_train = mu_x[test_inds], mu_x[train_inds] tau_test, tau_train = tau_x[test_inds], tau_x[train_inds] ``` @@ -463,7 +576,11 @@ We simulate from a BCF model using default settings. ## R ```{r} -general_params <- list(num_threads = 1) +general_params <- list( + num_threads = 1, + num_chains = 4, + random_seed = random_seed +) bcf_model <- bcf( X_train = X_train, Z_train = Z_train, @@ -472,6 +589,9 @@ bcf_model <- bcf( X_test = X_test, Z_test = Z_test, propensity_test = pi_test, + num_gfr = 10, + num_burnin = 1000, + num_mcmc = 100, general_params = general_params ) ``` @@ -479,28 +599,39 @@ bcf_model <- bcf( ## Python ```{python} +general_params = {"num_threads": 1, "num_chains": 4, "random_seed": random_seed} bcf_model = BCFModel() bcf_model.sample( - X_train=X_train, - Z_train=Z_train, - y_train=y_train, + X_train=X_train, + Z_train=Z_train, + y_train=y_train, propensity_train=pi_train, - X_test=X_test, - Z_test=Z_test, + X_test=X_test, + Z_test=Z_test, propensity_test=pi_test, - general_params={"num_threads": 1}, + num_gfr=10, + num_burnin=1000, + num_mcmc=100, + general_params=general_params, ) ``` :::: +Plot the true versus estimated prognostic function + ::::{.panel-tabset group="language"} ## R ```{r} -mu_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "prognostic_function") -tau_hat_test <- predict(bcf_model, X = X_test, Z = Z_test, propensity = pi_test, terms = "cate") +mu_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "prognostic_function" +) plot( rowMeans(mu_hat_test), mu_test, @@ -509,6 +640,40 @@ plot( main = "Prognostic function" ) abline(0, 1, col = "red", lty = 3, lwd = 3) +``` + +## Python + +```{python} +mu_hat_test = bcf_model.predict( + X=X_test, Z=Z_test, propensity=pi_test, terms="prognostic_function" +) +mu_pred = mu_hat_test.mean(axis=1) +lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) +plt.scatter(mu_pred, mu_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Prognostic function") +plt.show() +``` + +:::: + +Plot the true versus estimated CATE function + +::::{.panel-tabset group="language"} + +## R + +```{r} +tau_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "cate" +) plot( rowMeans(tau_hat_test), tau_test, @@ -517,8 +682,33 @@ plot( main = "Treatment effect" ) abline(0, 1, col = "red", lty = 3, lwd = 3) -sigma2_global_samples <- extractParameter(bcf_model, "sigma2_global") +``` + +## Python + +```{python} +tau_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="cate") +tau_pred = tau_hat_test.mean(axis=1) +lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) +plt.scatter(tau_pred, tau_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Treatment effect") +plt.show() +``` + +:::: + +Plot the $\sigma^2$ traceplot + +::::{.panel-tabset group="language"} + +## R + +```{r} sigma_observed <- var(y - E_XZ) +sigma2_global_samples <- extractParameter(bcf_model, "sigma2_global") plot_bounds <- c( min(c(sigma2_global_samples, sigma_observed)), max(c(sigma2_global_samples, sigma_observed)) @@ -536,34 +726,19 @@ abline(h = sigma_observed, lty = 3, lwd = 3, col = "blue") ## Python ```{python} -mu_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="prognostic_function") -tau_hat_test = bcf_model.predict(X=X_test, Z=Z_test, propensity=pi_test, terms="cate") sigma_observed = np.var(y - E_XZ) - -mu_pred = mu_hat_test.mean(axis=1) -lo, hi = min(mu_pred.min(), mu_test.min()), max(mu_pred.max(), mu_test.max()) -plt.scatter(mu_pred, mu_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Prognostic function") -plt.show() - -tau_pred = tau_hat_test.mean(axis=1) -lo, hi = min(tau_pred.min(), tau_test.min()), max(tau_pred.max(), tau_test.max()) -plt.scatter(tau_pred, tau_test, alpha=0.5) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Treatment effect") -plt.show() - -sigma2_global_samples = bcf_model.extract_parameter("sigma2_global") -plt.plot(sigma2_global_samples) +global_var_samples = bcf_model.extract_parameter("sigma2_global") +plt.plot(global_var_samples) plt.axhline(sigma_observed, color="blue", linestyle="dashed", linewidth=2) -plt.xlabel("Sample"); plt.ylabel(r"$\sigma^2$"); plt.title("Global variance parameter") +plt.xlabel("Sample") +plt.ylabel(r"$\sigma^2$") +plt.title("Global variance parameter") plt.show() ``` :::: -Examine test set interval coverage. +Examine test set interval coverage of $\tau(X)$. ::::{.panel-tabset group="language"} @@ -574,7 +749,7 @@ test_lb <- apply(tau_hat_test, 1, quantile, 0.025) test_ub <- apply(tau_hat_test, 1, quantile, 0.975) cover <- ((test_lb <= tau_x[test_inds]) & (test_ub >= tau_x[test_inds])) -mean(cover) +cat("CATE function interval coverage: ", mean(cover) * 100, "%\n") ``` ## Python @@ -583,7 +758,7 @@ mean(cover) test_lb = np.quantile(tau_hat_test, 0.025, axis=1) test_ub = np.quantile(tau_hat_test, 0.975, axis=1) cover = (test_lb <= tau_test) & (test_ub >= tau_test) -print(f"Coverage: {cover.mean():.3f}") +print(f"CATE function interval coverage: {cover.mean() * 100:.2f}%") ``` :::: diff --git a/vignettes/custom-sampling.qmd b/vignettes/custom-sampling.qmd index 2dc5c3b8b..37aacba48 100644 --- a/vignettes/custom-sampling.qmd +++ b/vignettes/custom-sampling.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) @@ -26,25 +26,49 @@ C++. # Motivation To illustrate when such a prototype interface might be useful, consider the classic -BART algorithm: - -| **INPUT**: $y$, $X$, $\tau$, $\nu$, $\lambda$, $\alpha$, $\beta$ -| **OUTPUT**: $m$ samples of a decision forest with $k$ trees and global variance parameter $\sigma^2$ -| Initialize $\sigma^2$ via a default or a data-dependent calibration exercise -| Initialize "forest 0" with $k$ trees with a single root node, referring to tree $j$'s prediction vector as $f_{0,j}$ -| Compute residual as $r = y - \sum_{j=1}^k f_{0,j}$ -| **FOR** $i$ **IN** $\left\{1,\dots,m\right\}$: -| Initialize forest $i$ from forest $i-1$ -| **FOR** $j$ **IN** $\left\{1,\dots,k\right\}$: -| Add predictions for tree $j$ to residual: $r = r + f_{i,j}$ -| Sample tree $j$ of forest $i$ from $p\left(\mathcal{T}_{i,j} \mid r, \sigma^2\right)$ -| Update residual by removing tree $j$'s predictions: $r = r - f_{i,j}$ -| Sample $\sigma^2$ from $p\left(\sigma^2 \mid r\right)$ -| **RETURN** forests $\left\{1,\dots,m\right\}$ and $\sigma^2$ samples - -This algorithm is implemented in the `bart()` function, but the low-level interface -allows you to customize this loop — for example, to add random effects, modify the -variance model, or implement a novel sampling scheme. +BART algorithm + + + +::: {.algorithm style="border-top: 2px solid; border-bottom: 2px solid; padding: 0.6em 1em; margin: 1.5em 0;"} + +Input: $y$, $X$, $\tau$, $\nu$, $\lambda$, $\alpha$, $\beta$ + +Output: $mc$ samples of a decision forest with $m$ trees and global variance parameter $\sigma^2$ + +Initialize $\sigma^2$ via a default or a data-dependent calibration exercise + +Initialize a forest with $m$ trees with a single root node, referring to tree $j$'s prediction vector as $f_{j}$ + +Compute residual as $r = y - \sum_{j=1}^k f_{j}$ + +For $i$ in $\left\{1,\dots,mc\right\}$: + +::::: {style="margin-left: 2em"} +For $j$ in $\left\{1,\dots,m\right\}$: + +::::: {style="margin-left: 2em"} +Add predictions for tree $j$ to residual: $r = r + f_{j}$ + +Sample tree $j$ of forest $i$ from $p\left(\mathcal{T}_{i,j} \mid r, \sigma^2\right)$ + +Sample tree $j$'s leaf parameters from $p\left(\theta_{i,j} \mid \mathcal{T}_{i,j}, r, \sigma^2\right)$ and update $f_j$ accordingly + +Update residual by removing tree $j$'s predictions: $r = r - f_{j}$ + +::::: + +Sample $\sigma^2$ from $p\left(\sigma^2 \mid r\right)$ + +::::: + +Return each of the forests and $\sigma^2$ draws + +::: + +This algorithm is implemented in `stochtree` via the `bart()` R function or the `BARTModel` python class, but the low-level interface allows you to customize this loop. + +In this vignette, we will demonstrate how to use this interface to fit a modified BART model in which the global error variance is modeled as $t$-distributed rather than Gaussian. # Setup @@ -70,426 +94,888 @@ from stochtree import ( :::: -# Supervised Learning Demo +Set seed for reproducibility + +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed <- 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +:::: + +# Data Generation and Preparation + +Consider a modified version of the "Friedman dataset" (@friedman1991multivariate) with heavy-tailed errors + +$$ +\begin{aligned} +Y_i \mid X_i = x_i &\overset{\text{iid}}{\sim} t_{\nu}\left(f(x_i), \sigma^2\right),\\ +f(x) &= 10 \sin \left(\pi x_1 x_2\right) + 20 (x_3 - 1/2)^2 + 10 x_4 + 5 x_5,\\ +X_1, \dots, X_p &\overset{\text{iid}}{\sim} \text{U}\left(0,1\right), +\end{aligned} +$$ -## Simulation +where $t_{\nu}(\mu,\sigma^2)$ represented a generalized $t$ distribution with location $\mu$, scale $\sigma^2$ and $\nu$ degrees of freedom. -Simulate a simple partitioned linear model. +We simulate from this dataset below ::::{.panel-tabset group="language"} ## R ```{r} -# Generate the data -n <- 500 -p_X <- 10 -p_W <- 1 -X <- matrix(runif(n*p_X), ncol = p_X) -W <- matrix(runif(n*p_W), ncol = p_W) -f_XW <- ( - ((0 <= X[,1]) & (0.25 > X[,1])) * (-3*W[,1]) + - ((0.25 <= X[,1]) & (0.5 > X[,1])) * (-1*W[,1]) + - ((0.5 <= X[,1]) & (0.75 > X[,1])) * (1*W[,1]) + - ((0.75 <= X[,1]) & (1 > X[,1])) * (3*W[,1]) +n <- 1000 +p <- 20 +X <- matrix(runif(n * p), ncol = p) +m_x <- (10 * + sin(pi * X[, 1] * X[, 2]) + + 20 * (X[, 3] - 0.5)^2 + + 10 * X[, 4] + + 5 * X[, 5]) +sigma2 <- 9 +nu <- 2 +eps <- rt(n, df = nu) * sqrt(sigma2) +y <- m_x + eps +sigma2_true <- var(eps) +``` + +## Python + +```{python} +n = 1000 +p = 20 +X = rng.uniform(low=0.0, high=1.0, size=(n, p)) +m_x = ( + 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + + 20 * np.power(X[:, 2] - 0.5, 2.0) + + 10 * X[:, 3] + + 5 * X[:, 4] ) -y <- f_XW + rnorm(n, 0, 1) +sigma2 = 9 +nu = 2 +eps = rng.standard_t(df=nu, size=n) * np.sqrt(sigma2) +y = m_x + eps +sigma2_true = np.var(eps) +``` + +:::: + +And we pre-standardize the outcome + +::::{.panel-tabset group="language"} + +## R -# Standardize outcome +```{r} y_bar <- mean(y) y_std <- sd(y) -resid <- (y-y_bar)/y_std +y_standardized <- (y - y_bar) / y_std ``` ## Python ```{python} -random_seed = 1234 -rng = np.random.default_rng(random_seed) - -n = 500 -p_X = 10 -p_W = 1 -X = rng.uniform(size=(n, p_X)) -W = rng.uniform(size=(n, p_W)) -# R uses X[,1] (1-indexed) = Python X[:,0] -f_XW = ( - ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * (-3 * W[:, 0]) + - ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * (-1 * W[:, 0]) + - ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * ( 1 * W[:, 0]) + - ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * ( 3 * W[:, 0]) -) -y = f_XW + rng.standard_normal(n) - -# Standardize outcome y_bar = np.mean(y) y_std = np.std(y) -resid = (y - y_bar) / y_std +y_standardized = (y - y_bar) / y_std ``` :::: -## Sampling - -Set some parameters that inform the forest and variance parameter samplers. +# Sampling + +We can obtain $t$-distributed errors by augmenting the basic BART model with a further prior on the individual variances: +$$ +\begin{aligned} +Y_i \mid (X_i = x_i) &\overset{\text{iid}}{\sim} \mathrm{N}(f(x_i), \phi_i),\\ +\phi_i &\overset{\text{iid}}{\sim} \text{IG}\left(\frac{\nu}{2}, \frac{\nu\sigma^2}{2}\right),\\ +f &\sim \mathrm{BART}(\alpha,\beta,m). +\end{aligned} +$$ +Any Gamma prior on $\sigma^2$ ensures conditional conjugacy, though for simplicity's sake we use a log-uniform prior $\sigma^2\propto 1 / \sigma^2$. In the implementation below, we sample from a "parameter-expanded" variant of this model discussed in Section 12.1 of @gelman2013bayesian, which possesses favorable convergence properties. +$$ +\begin{aligned} +Y_i \mid (X_i = x_i) &\overset{\text{iid}}{\sim} \mathrm{N}(f(x_i), a^2\phi_i),\\ +\phi_i &\overset{\text{iid}}{\sim} \text{IG}\left(\frac{\nu}{2}, \frac{\nu\tau^2}{2}\right),\\ +a^2 &\propto 1/a^2,\\ +\tau^2 &\propto 1/\tau^2,\\ +f &\sim \mathrm{BART}(\alpha,\beta,m). +\end{aligned} +$$ + +## Helper functions + +We define several helper functions for Gibbs draws of each of the above parameters. ::::{.panel-tabset group="language"} ## R ```{r} -alpha <- 0.9 -beta <- 1.25 -min_samples_leaf <- 1 -max_depth <- 10 -num_trees <- 100 -cutpoint_grid_size <- 100 -global_variance_init <- 1. -current_sigma2 <- global_variance_init -tau_init <- 1/num_trees -leaf_prior_scale <- as.matrix(ifelse(p_W >= 1, diag(tau_init, p_W), diag(tau_init, 1))) -nu <- 4 -lambda <- 0.5 -a_leaf <- 2. -b_leaf <- 0.5 -leaf_regression <- T -feature_types <- as.integer(rep(0, p_X)) # 0 = numeric -var_weights <- rep(1/p_X, p_X) +# Sample observation-specific variance parameters phi_i +sample_phi_i <- function(y, dataset, forest, a2, tau2, nu) { + n <- length(y) + yhat_forest <- forest$predict(dataset) + res <- y - yhat_forest + posterior_shape <- (nu + 1) / 2 + posterior_scale <- (nu * tau2 + (res * res / a2)) / 2 + return(1 / rgamma(n, posterior_shape, rate = posterior_scale)) +} + +# Sample variance parameter a^2 +sample_a2 <- function(y, dataset, forest, phi_i) { + n <- length(y) + yhat_forest <- forest$predict(dataset) + res <- y - yhat_forest + posterior_shape <- n / 2 + posterior_scale <- (1 / 2) * sum(res * res / phi_i) + return(1 / rgamma(1, posterior_shape, rate = posterior_scale)) +} + +# Sample variance parameter tau^2 +sample_tau2 <- function(phi_i, nu) { + n <- length(phi_i) + posterior_shape <- nu * n / 2 + posterior_scale <- (nu / 2) * sum(1 / phi_i) + return(1 / rgamma(1, posterior_shape, rate = posterior_scale)) +} ``` ## Python ```{python} -alpha = 0.9 -beta = 1.25 -min_samples_leaf = 1 -max_depth = 10 -num_trees = 100 -cutpoint_grid_size = 100 -global_variance_init = 1.0 -tau_init = 1.0 / num_trees -leaf_prior_scale = np.array([[tau_init]], order="C") -a_global = 4.0 -b_global = 2.0 -a_leaf = 2.0 -b_leaf = 0.5 -leaf_regression = True -feature_types = np.zeros(p_X, dtype=int) # 0 = numeric -var_weights = np.full(p_X, 1.0 / p_X) -leaf_model = 1 if leaf_regression else 0 # 1 = linear leaf (with basis) -leaf_dimension = p_W if leaf_regression else 1 +def sample_phi_i( + y: np.array, + dataset: Dataset, + forest: Forest, + a2: float, + tau2: float, + nu: float, + rng: np.random.Generator, +) -> np.array: + """ + Sample observation-specific variance parameters phi_i + """ + n = len(y) + yhat_forest = forest.predict(dataset) + res = y - yhat_forest + posterior_shape = (nu + 1) / 2 + posterior_scale = (nu * tau2 + (res * res / a2)) / 2 + return 1 / rng.gamma(shape=posterior_shape, scale=1 / posterior_scale, size=n) + + +def sample_a2( + y: np.array, + dataset: Dataset, + forest: Forest, + phi_i: np.array, + rng: np.random.Generator, +) -> float: + """ + Sample variance parameter a^2 + """ + n = len(y) + yhat_forest = forest.predict(dataset) + res = y - yhat_forest + posterior_shape = n / 2 + posterior_scale = (1 / 2) * np.sum(res * res / phi_i) + return 1 / rng.gamma(shape=posterior_shape, scale=1 / posterior_scale, size=1)[0] + + +def sample_tau2(phi_i: np.array, nu: float, rng: np.random.Generator) -> float: + """ + Sample variance parameter tau^2 + """ + n = len(phi_i) + posterior_shape = nu * n / 2 + posterior_scale = (nu / 2) * np.sum(1 / phi_i) + return 1 / rng.gamma(shape=posterior_shape, scale=1 / posterior_scale, size=1)[0] ``` :::: -Initialize R-level access to the C++ classes needed to sample our model. +## Sampling data structures + +The underlying C++ codebase centers around a handful of objects and their interactions. We provide R and Python wrappers for these objects to enable greater customization of stochastic tree samplers than can be furnished by the high-level BART and BCF interfaces. + +A "Forest Dataset" class manages covariates, bases, and variance weights used in a forest model, and contains methods for updating the underlying data as well as querying numeric attributes of the data (i.e. `num_observations`, `num_covariates`, `has_basis`, etc...). An Outcome / Residual class wraps the model outcome, which is updated in-place during sampling to reflect the full, or partial, residual net of mean forest or random effects predictions. A "Forest Samples" class is a container of sampled tree ensembles, essentially a very thin wrapper around a C++ `std::vector` of `std::unique_ptr` to `Ensemble` objects. A Forest class is a thin wrapper around `Ensemble` C++ objects, which is used as the "active forest" or "state" of the forest model during sampling. A "Forest Model" class maintains all of the "temporary" data structures used to sample a forest, and its `sample_one_iteration()` method performs one iteration of the requested forest sampling algorithm (i.e. Metropolis-Hastings or Grow-From-Root). Two different configuration objects (global and forest-specific) manage the parameters needed to run the samplers. + +Writing a custom Gibbs sampler with one or more stochastic forest terms requires initializing each of these objects and then deploying them in a sampling loop. + +First, we initialize the data objects with covariates and standardized outcomes ::::{.panel-tabset group="language"} ## R ```{r} -# Data -if (leaf_regression) { - forest_dataset <- createForestDataset(X, W) - outcome_model_type <- 1 - leaf_dimension <- p_W -} else { - forest_dataset <- createForestDataset(X) - outcome_model_type <- 0 - leaf_dimension <- 1 -} -outcome <- createOutcome(resid) +# Initial values of robust model parameters +tau2_init <- 1. +a2_init <- 1. +sigma2_init <- 1. +phi_i_init <- rep(1., n) + +# Initialize data objects +forest_dataset <- createForestDataset(X, variance_weights = 1 / phi_i_init) +outcome <- createOutcome(y_standardized) +``` -# Random number generator (std::mt19937) -rng <- createCppRNG() +## Python -# Sampling data structures -forest_model_config <- createForestModelConfig( - feature_types = feature_types, num_trees = num_trees, num_features = p_X, - num_observations = n, variable_weights = var_weights, leaf_dimension = leaf_dimension, - alpha = alpha, beta = beta, min_samples_leaf = min_samples_leaf, max_depth = max_depth, - leaf_model_type = outcome_model_type, leaf_model_scale = leaf_prior_scale, - cutpoint_grid_size = cutpoint_grid_size -) -global_model_config <- createGlobalModelConfig(global_error_variance = global_variance_init) -forest_model <- createForestModel(forest_dataset, forest_model_config, global_model_config) +```{python} +# Initial values of robust model parameters +tau2_init = 1.0 +a2_init = 1.0 +sigma2_init = tau2_init * a2_init +phi_i_init = np.repeat(1.0, n) + +# Initialize data objects +forest_dataset = Dataset() +forest_dataset.add_covariates(X) +forest_dataset.add_variance_weights(1.0 / phi_i_init) +residual = Residual(y_standardized) +``` -# "Active forest" (which gets updated by the sample) and -# container of forest samples (which is written to when -# a sample is not discarded due to burn-in / thinning) -if (leaf_regression) { - forest_samples <- createForestSamples(num_trees, 1, F) - active_forest <- createForest(num_trees, 1, F) -} else { - forest_samples <- createForestSamples(num_trees, 1, T) - active_forest <- createForest(num_trees, 1, T) -} +:::: -# Initialize the leaves of each tree in the forest -active_forest$prepare_for_sampler(forest_dataset, outcome, forest_model, outcome_model_type, mean(resid)) -active_forest$adjust_residual(forest_dataset, outcome, forest_model, ifelse(outcome_model_type==1, T, F), F) +Next, we initialize random number generator objects, which are essentially wrappers around `std::mt19937`, which can optionally be seeded for reproducibility. + +::::{.panel-tabset group="language"} + +## R + +```{r} +rng <- createCppRNG(random_seed) ``` ## Python ```{python} -# Dataset: covariates + basis -dataset = Dataset() -dataset.add_covariates(X) -dataset.add_basis(W) +cpp_rng = RNG(random_seed) +``` -# Residual -residual = Residual(resid) +:::: -# Random number generator -cpp_rng = RNG(random_seed) +Next, we initialize the configuration objects. Note that each config has default values so these parameters do not all need to be explicitly set. -# Forest container (stores samples) and active forest (updated each iteration) -forest_container = ForestContainer(num_trees, leaf_dimension, False, False) -active_forest = Forest(num_trees, leaf_dimension, False, False) +::::{.panel-tabset group="language"} -# Sampler config -global_model_config = GlobalModelConfig(global_error_variance=global_variance_init) +## R + +```{r} +# Set parameters +outcome_model_type <- 0 +leaf_dimension <- 1 +num_trees <- 200 +feature_types <- as.integer(rep(0, p)) # 0 = numeric +variable_weights <- rep(1 / p, p) + +# Initialize config objects +forest_model_config <- createForestModelConfig( + feature_types = feature_types, + num_trees = num_trees, + min_samples_leaf = 5, + num_features = p, + num_observations = n, + variable_weights = variable_weights, + leaf_dimension = leaf_dimension, + leaf_model_type = outcome_model_type +) +global_model_config <- createGlobalModelConfig( + global_error_variance = sigma2_init +) +``` + +## Python + +```{python} +# Set parameters +outcome_model_type = 0 +leaf_dimension = 1 +num_trees = 200 +feature_types = np.repeat(0, p).astype(int) # 0 = numeric +var_weights = np.repeat(1 / p, p) + +# Initialize config objects forest_model_config = ForestModelConfig( + feature_types=feature_types, num_trees=num_trees, - num_features=p_X, + num_features=p, num_observations=n, - feature_types=feature_types, variable_weights=var_weights, leaf_dimension=leaf_dimension, - alpha=alpha, - beta=beta, - min_samples_leaf=min_samples_leaf, - max_depth=max_depth, - leaf_model_type=leaf_model, - leaf_model_scale=leaf_prior_scale, - cutpoint_grid_size=cutpoint_grid_size, + leaf_model_type=outcome_model_type, ) -forest_sampler = ForestSampler(dataset, global_model_config, forest_model_config) -global_var_model = GlobalVarianceModel() -leaf_var_model = LeafVarianceModel() - -# Initialize leaves of each tree -forest_init_val = np.zeros(leaf_dimension) -forest_sampler.prepare_for_sampler(dataset, residual, active_forest, leaf_model, forest_init_val) +global_model_config = GlobalModelConfig(global_error_variance=sigma2_init) ``` :::: -Prepare to run the sampler. + +Next, we initialize forest model / sampler objects which dispatch the sampling algorithms ::::{.panel-tabset group="language"} ## R ```{r} -num_warmstart <- 10 -num_mcmc <- 100 -num_samples <- num_warmstart + num_mcmc -global_var_samples <- c(global_variance_init, rep(0, num_samples)) -leaf_scale_samples <- c(tau_init, rep(0, num_samples)) +forest_model <- createForestModel(forest_dataset, forest_model_config, global_model_config) ``` ## Python ```{python} -num_warmstart = 10 -num_mcmc = 100 -num_samples = num_warmstart + num_mcmc -global_var_samples = np.concatenate([[global_variance_init], np.zeros(num_samples)]) -leaf_scale_samples = np.concatenate([[tau_init], np.zeros(num_samples)]) +forest_sampler = ForestSampler( + forest_dataset, + global_model_config, + forest_model_config +) ``` :::: -Run the grow-from-root sampler to "warm-start" BART (@he2023stochastic). +Initialize both the (empty) container of retained forest samples and the "active forest." + +We set the leaf node values for every (single-node) tree in the active forest so that they sum to the mean of the scaled outcome (which is 0 since it was centered). ::::{.panel-tabset group="language"} ## R ```{r} -for (i in 1:num_warmstart) { - # Sample forest - forest_model$sample_one_iteration( - forest_dataset, outcome, forest_samples, active_forest, rng, - forest_model_config, global_model_config, keep_forest = T, gfr = T - ) +# Create forest container and active forest +forest_samples <- createForestSamples(num_trees, 1, T) +active_forest <- createForest(num_trees, 1, T) + +# Initialize the leaves of each tree in the active forest +leaf_init <- mean(y_standardized) +active_forest$prepare_for_sampler( + forest_dataset, + outcome, + forest_model, + outcome_model_type, + leaf_init +) +``` - # Sample global variance parameter - current_sigma2 <- sampleGlobalErrorVarianceOneIteration( - outcome, forest_dataset, rng, nu, lambda - ) - global_var_samples[i+1] <- current_sigma2 - global_model_config$update_global_error_variance(current_sigma2) +## Python - # Sample leaf node variance parameter and update `leaf_prior_scale` - leaf_scale_samples[i+1] <- sampleLeafVarianceOneIteration( - active_forest, rng, a_leaf, b_leaf - ) - leaf_prior_scale[1,1] <- leaf_scale_samples[i+1] - forest_model_config$update_leaf_model_scale(leaf_prior_scale) -} +```{python} +# Create forest container and active forest +forest_container = ForestContainer(num_trees, leaf_dimension, True, False) +active_forest = Forest(num_trees, leaf_dimension, True, False) + +# Initialize the leaves of each tree in the active forest +leaf_init = np.mean(y_standardized, keepdims=True) +forest_sampler.prepare_for_sampler( + forest_dataset, + residual, + active_forest, + outcome_model_type, + leaf_init, +) +``` + +:::: + +We prepare to run the sampler by initialize empty containers for all of the parametric components of the model (and other intermediate values we track such as RMSE and predicted values). + +::::{.panel-tabset group="language"} + +## R + +```{r} +num_burnin <- 3000 +num_mcmc <- 1000 +sigma2_samples <- rep(NA, num_mcmc) +a2_samples <- rep(NA, num_mcmc) +tau2_samples <- rep(NA, num_mcmc) +phi_i_samples <- matrix(NA, n, num_mcmc) +rmse_samples <- rep(0, num_mcmc) +fhat_samples <- matrix(0, n, num_mcmc) +current_sigma2 <- sigma2_init +current_a2 <- a2_init +current_tau2 <- tau2_init +current_phi_i <- phi_i_init ``` ## Python ```{python} -for i in range(num_warmstart): - forest_sampler.sample_one_iteration( - forest_container, active_forest, dataset, residual, cpp_rng, - global_model_config, forest_model_config, True, True, 1, # keep_forest=True, gfr=True, num_threads=1 - ) - current_sigma2 = global_var_model.sample_one_iteration(residual, cpp_rng, a_global, b_global) - global_var_samples[i + 1] = current_sigma2 - leaf_scale_samples[i + 1] = leaf_var_model.sample_one_iteration(active_forest, cpp_rng, a_leaf, b_leaf) - leaf_prior_scale[0, 0] = leaf_scale_samples[i + 1] - forest_model_config.update_leaf_model_scale(leaf_prior_scale) +num_burnin = 3000 +num_mcmc = 1000 +sigma2_samples = np.empty(num_mcmc) +a2_samples = np.empty(num_mcmc) +tau2_samples = np.empty(num_mcmc) +phi_i_samples = np.empty((n, num_mcmc)) +rmse_samples = np.empty(num_mcmc) +fhat_samples = np.empty((n, num_mcmc)) +current_sigma2 = sigma2_init +current_a2 = a2_init +current_tau2 = tau2_init +current_phi_i = phi_i_init ``` :::: -Pick up from the last GFR forest (and associated global variance / leaf scale -parameters) with an MCMC sampler. +Run an MCMC sampler ::::{.panel-tabset group="language"} ## R ```{r} -for (i in (num_warmstart+1):num_samples) { +for (i in 1:(num_burnin + num_mcmc)) { + keep_sample <- i > num_burnin + # Sample forest forest_model$sample_one_iteration( - forest_dataset, outcome, forest_samples, active_forest, rng, - forest_model_config, global_model_config, keep_forest = T, gfr = F + forest_dataset, + outcome, + forest_samples, + active_forest, + rng, + forest_model_config, + global_model_config, + keep_forest = keep_sample, + gfr = F, + num_threads = 1 ) - # Sample global variance parameter - current_sigma2 <- sampleGlobalErrorVarianceOneIteration( - outcome, forest_dataset, rng, nu, lambda + # Sample local variance parameters + current_phi_i <- sample_phi_i( + y_standardized, + forest_dataset, + active_forest, + current_a2, + current_tau2, + nu ) - global_var_samples[i+1] <- current_sigma2 - global_model_config$update_global_error_variance(current_sigma2) - # Sample leaf node variance parameter and update `leaf_prior_scale` - leaf_scale_samples[i+1] <- sampleLeafVarianceOneIteration( - active_forest, rng, a_leaf, b_leaf + # Sample a2 + current_a2 <- sample_a2( + y_standardized, + forest_dataset, + active_forest, + current_phi_i ) - leaf_prior_scale[1,1] <- leaf_scale_samples[i+1] - forest_model_config$update_leaf_model_scale(leaf_prior_scale) + if (keep_sample) { + a2_samples[i - num_burnin] <- current_a2 * y_std^2 + } + + # Sample tau2 + current_tau2 <- sample_tau2(current_phi_i, nu) + if (keep_sample) { + tau2_samples[i - num_burnin] <- current_tau2 * y_std^2 + sigma2_samples[i - num_burnin] <- current_tau2 * current_a2 * y_std^2 + } + + # Update observation-specific variance weights + forest_dataset$update_variance_weights(current_phi_i * current_a2) + + # Compute in-sample RMSE and cache mean function samples + if (keep_sample) { + yhat_forest <- active_forest$predict(forest_dataset) * y_std + y_bar + error <- (m_x - yhat_forest) + rmse_samples[i - num_burnin] <- sqrt(mean(error * error)) + fhat_samples[, i - num_burnin] <- yhat_forest + } } ``` ## Python ```{python} -for i in range(num_warmstart, num_samples): +keep_sample = False +for i in range(num_burnin + num_mcmc): + if i >= num_burnin: + keep_sample = True + + # Sample from the forest forest_sampler.sample_one_iteration( - forest_container, active_forest, dataset, residual, cpp_rng, - global_model_config, forest_model_config, True, False, 1, # keep_forest=True, gfr=False, num_threads=1 + forest_container=forest_container, + forest=active_forest, + dataset=forest_dataset, + residual=residual, + rng=cpp_rng, + global_config=global_model_config, + forest_config=forest_model_config, + keep_forest=keep_sample, + gfr=False, + num_threads=1 + ) + + # Sample local variance parameters + current_phi_i = sample_phi_i( + y_standardized, + forest_dataset, + active_forest, + current_a2, + current_tau2, + nu, + rng, ) - current_sigma2 = global_var_model.sample_one_iteration(residual, cpp_rng, a_global, b_global) - global_var_samples[i + 1] = current_sigma2 - leaf_scale_samples[i + 1] = leaf_var_model.sample_one_iteration(active_forest, cpp_rng, a_leaf, b_leaf) - leaf_prior_scale[0, 0] = leaf_scale_samples[i + 1] - forest_model_config.update_leaf_model_scale(leaf_prior_scale) + + # Sample a2 + current_a2 = sample_a2( + y_standardized, + forest_dataset, + active_forest, + current_phi_i, + rng, + ) + + # Sample tau2 + current_tau2 = sample_tau2(current_phi_i, nu, rng) + if keep_sample: + tau2_samples[i - num_burnin] = current_tau2 * y_std * y_std + sigma2_samples[i - num_burnin] = current_tau2 * current_a2 * y_std * y_std + + # Update observation-specific variance weights + forest_dataset.update_variance_weights(current_phi_i * current_a2) + + # Compute in-sample RMSE and cache mean function samples + if keep_sample: + yhat_forest = active_forest.predict(forest_dataset) * y_std + y_bar + error = m_x - yhat_forest + rmse_samples[i - num_burnin] = np.sqrt(np.mean(error * error)) + fhat_samples[:, i - num_burnin] = yhat_forest ``` :::: -Predict and rescale samples. +Compute posterior mean of the conditional expectations for the non-robust model ::::{.panel-tabset group="language"} ## R ```{r} -# Forest predictions -preds <- forest_samples$predict(forest_dataset)*y_std + y_bar +m_x_hat_posterior_mean <- rowMeans(fhat_samples) +``` + +## Python + +```{python} +m_x_hat_posterior_mean = np.mean(fhat_samples, axis=1) +``` + +:::: + +For comparison, we run the same model without robust errors + +::::{.panel-tabset group="language"} + +## R + +```{r} +# Initial value of global error variance parameter +sigma2_init <- 1.0 + +# Initialize data objects +forest_dataset <- createForestDataset(X) +outcome <- createOutcome(y_standardized) + +# Random number generator (std::mt19937) +rng <- createCppRNG(random_seed) + +# Model configuration +outcome_model_type <- 0 +leaf_dimension <- 1 +num_trees <- 200 +feature_types <- as.integer(rep(0, p)) # 0 = numeric +variable_weights <- rep(1 / p, p) +forest_model_config <- createForestModelConfig( + feature_types = feature_types, + num_trees = num_trees, + num_features = p, + min_samples_leaf = 5, + num_observations = n, + variable_weights = variable_weights, + leaf_dimension = leaf_dimension, + leaf_model_type = outcome_model_type +) +global_model_config <- createGlobalModelConfig( + global_error_variance = sigma2_init +) + +# Forest model object +forest_model <- createForestModel( + forest_dataset, + forest_model_config, + global_model_config +) + +# "Active forest" (which gets updated by the sample) and +# container of forest samples (which is written to when +# a sample is not discarded due to burn-in / thinning) +forest_samples <- createForestSamples(num_trees, 1, T) +active_forest <- createForest(num_trees, 1, T) + +# Initialize the leaves of each tree in the forest +leaf_init <- mean(y_standardized) +active_forest$prepare_for_sampler( + forest_dataset, + outcome, + forest_model, + outcome_model_type, + leaf_init +) +active_forest$adjust_residual(forest_dataset, outcome, forest_model, F, F) + +# Prepare to run the sampler +global_var_samples <- rep(NA, num_mcmc) +rmse_samples_non_robust <- rep(0, num_mcmc) +fhat_samples_non_robust <- matrix(0, n, num_mcmc) +current_sigma2 <- sigma2_init -# Global error variance -sigma_samples <- sqrt(global_var_samples)*y_std +# Run the MCMC sampler +for (i in 1:(num_burnin + num_mcmc)) { + keep_sample <- i > num_burnin + + # Sample forest + forest_model$sample_one_iteration( + forest_dataset, + outcome, + forest_samples, + active_forest, + rng, + forest_model_config, + global_model_config, + keep_forest = keep_sample, + gfr = F, + num_threads = 1 + ) + + # Sample global error variance parameter + current_sigma2 <- sampleGlobalErrorVarianceOneIteration( + outcome, + forest_dataset, + rng, + 1, + 1 + ) + global_model_config$update_global_error_variance(current_sigma2) + if (keep_sample) { + global_var_samples[i - num_burnin] <- current_sigma2 * y_std^2 + } + + # Compute in-sample RMSE + if (keep_sample) { + yhat_forest <- active_forest$predict(forest_dataset) * y_std + y_bar + error <- (m_x - yhat_forest) + rmse_samples_non_robust[i - num_burnin] <- sqrt(mean(error * error)) + fhat_samples_non_robust[, i - num_burnin] <- yhat_forest + } +} ``` ## Python ```{python} -# Forest predictions: shape (n, num_samples); rescale to original y scale -preds = forest_container.predict(dataset) * y_std + y_bar +# Initial value of global error variance parameter +sigma2_init = 1.0 -# Global error variance (sigma, not sigma^2) -sigma_samples = np.sqrt(global_var_samples) * y_std +# Initialize data objects +forest_dataset = Dataset() +forest_dataset.add_covariates(X) +residual = Residual(y_standardized) + +# Random number generator (std::mt19937) +cpp_rng = RNG(random_seed) + +# Model configuration +outcome_model_type = 0 +leaf_dimension = 1 +num_trees = 200 +feature_types = np.repeat(0, p).astype(int) # 0 = numeric +var_weights = np.repeat(1 / p, p) +global_model_config = GlobalModelConfig(global_error_variance=sigma2_init) +forest_model_config = ForestModelConfig( + feature_types=feature_types, + num_trees=num_trees, + num_features=p, + num_observations=n, + variable_weights=var_weights, + leaf_dimension=leaf_dimension, + leaf_model_type=outcome_model_type, +) + +# Forest model object +forest_sampler = ForestSampler(forest_dataset, global_model_config, forest_model_config) + +# "Active forest" (which gets updated by the sample) and +# container of forest samples (which is written to when +# a sample is not discarded due to burn-in / thinning) +active_forest = Forest(num_trees, leaf_dimension, True, False) +forest_container = ForestContainer(num_trees, leaf_dimension, True, False) + +# Initialize the leaves of each tree in the mean forest +leaf_init = np.mean(y_standardized, keepdims=True) +forest_sampler.prepare_for_sampler( + forest_dataset, + residual, + active_forest, + outcome_model_type, + leaf_init, +) + +# Global error variance model +global_var_model = GlobalVarianceModel() + +# Prepare to run the sampler +num_burnin = 3000 +num_mcmc = 1000 +sigma2_samples_non_robust = np.empty(num_mcmc) +rmse_samples_non_robust = np.empty(num_mcmc) +fhat_samples_non_robust = np.empty((n, num_mcmc)) +current_sigma2 = sigma2_init + +# Run the MCMC sampler +keep_sample = False +for i in range(num_burnin + num_mcmc): + if i >= num_burnin: + keep_sample = True + + # Sample from the forest + forest_sampler.sample_one_iteration( + forest_container=forest_container, + forest=active_forest, + dataset=forest_dataset, + residual=residual, + rng=cpp_rng, + global_config=global_model_config, + forest_config=forest_model_config, + keep_forest=keep_sample, + gfr=False, + num_threads=1 + ) + + # Sample global variance parameter + current_sigma2 = global_var_model.sample_one_iteration(residual, cpp_rng, 1.0, 1.0) + global_model_config.update_global_error_variance(current_sigma2) + if keep_sample: + sigma2_samples_non_robust[i - num_burnin] = current_sigma2 * y_std * y_std + + # Compute in-sample RMSE and cache mean function samples + if keep_sample: + yhat_forest = active_forest.predict(forest_dataset) * y_std + y_bar + error = m_x - yhat_forest + rmse_samples_non_robust[i - num_burnin] = np.sqrt(np.mean(error * error)) + fhat_samples_non_robust[:, i - num_burnin] = yhat_forest ``` :::: ## Results -Inspect the initial samples obtained via "grow-from-root" (@he2023stochastic). +Plot RMSE samples side-by-side ::::{.panel-tabset group="language"} ## R ```{r} -plot(sigma_samples[1:num_warmstart], ylab="sigma") -plot(rowMeans(preds[,1:num_warmstart]), y, pch=16, - cex=0.75, xlab = "pred", ylab = "actual") -abline(0,1,col="red",lty=2,lwd=2.5) +par(mar = c(4, 4, 0.5, 0.5)) +y_bounds <- range(c(rmse_samples, rmse_samples_non_robust)) +y_bounds[2] <- y_bounds[2] * 1.25 +plot( + rmse_samples, + type = "l", + col = "blue", + ylim = y_bounds, + ylab = "In-Sample RMSE", + xlab = "Iteration" +) +lines(rmse_samples_non_robust, col = "red") +legend( + "topleft", + legend = c("Gaussian Errors", "t Errors"), + col = c("red", "blue"), + lty = 1 +) ``` ## Python ```{python} -gfr_preds = preds[:, :num_warmstart] -fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) -ax1.plot(sigma_samples[1:num_warmstart + 1]) -ax1.set_ylabel("sigma") -ax1.set_title("GFR: Global Error Scale") -ax2.scatter(gfr_preds.mean(axis=1), y, s=10, alpha=0.5) -lo = min(gfr_preds.mean(axis=1).min(), y.min()) -hi = max(gfr_preds.mean(axis=1).max(), y.max()) -ax2.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -ax2.set_xlabel("pred") -ax2.set_ylabel("actual") -ax2.set_title("GFR: Predicted vs Actual") +y_bounds = ( + np.min([rmse_samples, rmse_samples_non_robust]) * 0.8, + np.max([rmse_samples, rmse_samples_non_robust]) * 1.25, +) +plt.ylim(y_bounds) +plt.plot(rmse_samples, label="t Errors", color="blue") +plt.plot( + rmse_samples_non_robust, + label="Gaussian Errors", + color="red", +) +plt.ylabel("In-Sample RMSE") +plt.xlabel("Iteration") +plt.legend(loc="upper left") plt.tight_layout() plt.show() ``` :::: -Inspect the BART samples obtained after "warm-starting". +Compute the posterior mean of conditional expectations for the non-robust model and compare to the robust model ::::{.panel-tabset group="language"} ## R ```{r} -plot(sigma_samples[(num_warmstart+1):num_samples], ylab="sigma") -plot(rowMeans(preds[,(num_warmstart+1):num_samples]), y, pch=16, - cex=0.75, xlab = "pred", ylab = "actual") -abline(0,1,col="red",lty=2,lwd=2.5) +m_x_hat_posterior_mean_non_robust <- rowMeans(fhat_samples_non_robust) +par(mar = c(4, 4, 0.5, 0.5)) +y_bounds <- range(m_x) +y_bounds[2] <- y_bounds[2] * 1.1 +plot( + m_x_hat_posterior_mean_non_robust, + m_x, + pch = 20, + col = 'lightgray', + xlab = 'Predicted f(x)', + ylab = 'True f(x)', + ylim = y_bounds +) +abline(0, 1) +points(m_x_hat_posterior_mean, m_x, pch = 20, cex = 0.5) +legend( + "topleft", + legend = c('Gaussian errors', 't errors'), + pch = c(20, 20), + col = c('lightgray', 'black') +) ``` ## Python ```{python} -mcmc_preds = preds[:, num_warmstart:num_samples] -fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 4)) -ax1.plot(sigma_samples[num_warmstart + 1:num_samples + 1]) -ax1.set_ylabel("sigma") -ax1.set_title("MCMC: Global Error Scale") -ax2.scatter(mcmc_preds.mean(axis=1), y, s=10, alpha=0.5) -lo = min(mcmc_preds.mean(axis=1).min(), y.min()) -hi = max(mcmc_preds.mean(axis=1).max(), y.max()) -ax2.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -ax2.set_xlabel("pred") -ax2.set_ylabel("actual") -ax2.set_title("MCMC: Predicted vs Actual") +m_x_hat_posterior_mean_non_robust = np.mean(fhat_samples_non_robust, axis=1) +margin = 0.05 * (np.max(m_x) - np.min(m_x)) +y_bounds = (np.min(m_x) - margin, np.max(m_x) + margin) +plt.ylim(y_bounds) +plt.scatter( + m_x_hat_posterior_mean_non_robust, m_x, label="Gaussian Errors", color="lightgray" +) +plt.scatter(m_x_hat_posterior_mean, m_x, label="t Errors", color="black") +plt.axline((np.mean(m_x), np.mean(m_x)), slope=1, color="black", linestyle=(0, (3, 3))) +plt.ylabel("True f(x)") +plt.xlabel("Predicted f(x)") +plt.legend(loc="upper left") plt.tight_layout() -plt.show() ``` :::: diff --git a/vignettes/ensemble-kernel.qmd b/vignettes/ensemble-kernel.qmd index eb3db0e42..ab399f8b9 100644 --- a/vignettes/ensemble-kernel.qmd +++ b/vignettes/ensemble-kernel.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) @@ -28,14 +28,31 @@ it reserves the numbers 1 - 3, and in turn if tree 2 has 5 leaves, it reserves t numbers 4 - 8 to label its leaves, and so on). For a dataset with $n$ observations, we construct the matrix $W$ as follows: -| Initialize $W$ as a matrix of all zeroes with $n$ rows and as many columns as leaves in the ensemble -| Let `s` = 0 -| **FOR** $j$ **IN** $\left\{1,\dots,m\right\}$: -| Let `num_leaves` be the number of leaves in tree $j$ -| **FOR** $i$ **IN** $\left\{1,\dots,n\right\}$: -| Let `k` be the leaf to which tree $j$ maps observation $i$ -| Set element $W_{i,k+s} = 1$ -| Let `s` = `s + num_leaves` + + +::: {.algorithm style="border-top: 2px solid; border-bottom: 2px solid; padding: 0.6em 1em; margin: 1.5em 0;"} + +Initialize $W$ as a matrix of all zeroes with $n$ rows and as many columns as leaves in the ensemble + +Let `s` = 0 + +For $j$ in $\left\{1,\dots,m\right\}$: + +:::: {style="margin-left: 2em"} +Let `num_leaves` be the number of leaves in tree $j$ + +For $i$ in $\left\{1,\dots,n\right\}$: + +::::: {style="margin-left: 2em"} +Let `k` be the leaf to which tree $j$ maps observation $i$ + +Set element $W_{i,k+s} = 1$ +::::: + +Let `s` = `s + num_leaves` +:::: + +::: This sparse matrix $W$ is a matrix representation of the basis predictions of an ensemble (i.e. integrating out the leaf parameters and just analyzing the leaf diff --git a/vignettes/heteroskedastic.qmd b/vignettes/heteroskedastic.qmd index eb443a5cc..12ca5a9ba 100644 --- a/vignettes/heteroskedastic.qmd +++ b/vignettes/heteroskedastic.qmd @@ -10,18 +10,18 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) ``` -This vignette demonstrates how to use the `bart()` function for Bayesian supervised -learning (@chipman2010bart) and causal inference (@hahn2020bayesian), with an -additional "variance forest," for modeling conditional variance (see @murray2021log). +This vignette demonstrates how to configure a "variance forest" in stochtree for modeling conditional variance (see @murray2021log). # Setup +Load necessary packages + ::::{.panel-tabset group="language"} ## R @@ -36,15 +36,31 @@ library(stochtree) import numpy as np import matplotlib.pyplot as plt from stochtree import BARTModel - -rng = np.random.default_rng(101) ``` :::: -# Section 1: Supervised Learning +Set a random seed + +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed = 1234 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 1234 +rng = np.random.default_rng(random_seed) +``` + +:::: -## Demo 1: Variance-Only Simulation (Simple DGP) +# Demo 1: Variance-Only Simulation (Simple DGP) Here, we generate data with a constant (zero) mean and a relatively simple covariate-modified variance function. @@ -63,34 +79,57 @@ X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ \end{aligned} \end{equation*} -### Simulation +## Simulation + +Generate data from the DGP above ::::{.panel-tabset group="language"} ## R ```{r} -# Generate the data -n <- 500 +n <- 1000 p_x <- 10 -X <- matrix(runif(n*p_x), ncol = p_x) +X <- matrix(runif(n * p_x), ncol = p_x) f_XW <- 0 -s_XW <- ( - ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5) + - ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1) + - ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2) + - ((0.75 <= X[,1]) & (1 > X[,1])) * (3) +s_XW <- (((0 <= X[, 1]) & (0.25 > X[, 1])) * + (0.5) + + ((0.25 <= X[, 1]) & (0.5 > X[, 1])) * (1) + + ((0.5 <= X[, 1]) & (0.75 > X[, 1])) * (2) + + ((0.75 <= X[, 1]) & (1 > X[, 1])) * (3)) +y <- f_XW + rnorm(n, 0, 1) * s_XW +``` + +## Python + +```{python} +n, p_x = 1000, 10 +X = rng.uniform(size=(n, p_x)) +s_XW = ( + ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * 0.5 + + ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * 1.0 + + ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * 2.0 + + ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * 3.0 ) -y <- f_XW + rnorm(n, 0, 1)*s_XW +y = rng.normal(size=n) * s_XW +``` -# Split data into test and train sets +:::: + +Split into train and test sets + +::::{.panel-tabset group="language"} + +## R + +```{r} test_set_pct <- 0.2 -n_test <- round(test_set_pct*n) +n_test <- round(test_set_pct * n) n_train <- n - n_test test_inds <- sort(sample(1:n, n_test, replace = FALSE)) train_inds <- (1:n)[!((1:n) %in% test_inds)] -X_test <- as.data.frame(X[test_inds,]) -X_train <- as.data.frame(X[train_inds,]) +X_test <- as.data.frame(X[test_inds, ]) +X_train <- as.data.frame(X[train_inds, ]) y_test <- y[test_inds] y_train <- y[train_inds] s_x_test <- s_XW[test_inds] @@ -100,34 +139,22 @@ s_x_train <- s_XW[train_inds] ## Python ```{python} -n, p_x = 500, 10 -X = rng.uniform(size=(n, p_x)) -# Note: R's X[,1] = Python's X[:,0] -s_XW = ( - ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * 0.5 + - ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * 1.0 + - ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * 2.0 + - ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * 3.0 -) -y = rng.normal(size=n) * s_XW - test_set_pct = 0.2 n_test = round(test_set_pct * n) -test_inds = rng.choice(n, n_test, replace=False) +test_inds = rng.choice(n, n_test, replace=False) train_inds = np.setdiff1d(np.arange(n), test_inds) -X_test, X_train = X[test_inds], X[train_inds] -y_test, y_train = y[test_inds], y[train_inds] +X_test, X_train = X[test_inds], X[train_inds] +y_test, y_train = y[test_inds], y[train_inds] s_x_test, s_x_train = s_XW[test_inds], s_XW[train_inds] ``` :::: -### Sampling and Analysis +## Sampling and Analysis -#### Warmstart +We sample four chains of the $\sigma^2(X)$ forest using "warm-start" initialization (@he2023stochastic). -We first sample the $\sigma^2(X)$ ensemble using "warm-start" initialization -(@he2023stochastic). This is the default in `stochtree`. +We use fewer trees for the variance forest than typically used for mean forests, and we disable sampling a global error scale and omit the mean forest by setting `num_trees = 0` in its parameter list. ::::{.panel-tabset group="language"} @@ -139,26 +166,48 @@ num_burnin <- 0 num_mcmc <- 100 num_trees <- 20 num_samples <- num_gfr + num_burnin + num_mcmc -general_params <- list(sample_sigma2_global = F) +general_params <- list( + sample_sigma2_global = F, + num_chains = 4, + num_threads = 1, + random_seed = random_seed +) mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0) variance_forest_params <- list(num_trees = num_trees) -bart_model_warmstart <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params +bart_model <- stochtree::bart( + X_train = X_train, + y_train = y_train, + X_test = X_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params ) ``` ## Python ```{python} +num_gfr = 10 +num_burnin = 0 +num_mcmc = 100 num_trees = 20 -bart_model_warmstart = BARTModel() -bart_model_warmstart.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=10, num_burnin=0, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, + y_train=y_train, + X_test=X_test, + num_gfr=num_gfr, + num_burnin=num_burnin, + num_mcmc=num_mcmc, + general_params={ + "sample_sigma2_global": False, + "num_threads": 1, + "num_chains": 4, + "random_seed": random_seed, + }, mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0}, variance_forest_params={"num_trees": num_trees}, ) @@ -166,96 +215,50 @@ bart_model_warmstart.sample( :::: -Inspect the MCMC samples. +We inspect the model by plotting the true variance function against its forest-based predictions ::::{.panel-tabset group="language"} ## R ```{r} -plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) -``` - -## Python - -```{python} -var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") -plt.show() -``` - -:::: - -#### MCMC - -We now sample the $\sigma^2(X)$ ensemble using MCMC with root initialization (as in -@chipman2010bart). - -::::{.panel-tabset group="language"} - -## R - -```{r} -num_gfr <- 0 -num_burnin <- 1000 -num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0) -variance_forest_params <- list(num_trees = num_trees) -bart_model_mcmc <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params +sigma2_x_hat_test <- predict( + bart_model, + X = X_test, + terms = "variance_forest", + type = "mean" ) -``` - -## Python - -```{python} -bart_model_mcmc = BARTModel() -bart_model_mcmc.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=0, num_burnin=1000, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, - mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0}, - variance_forest_params={"num_trees": num_trees}, +plot( + sigma2_x_hat_test, + s_x_test^2, + pch = 16, + cex = 0.75, + xlab = "Predicted", + ylab = "Actual", + main = "Variance function" ) -``` - -:::: - -Inspect the MCMC samples. - -::::{.panel-tabset group="language"} - -## R - -```{r} -plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) +abline(0, 1, col = "red", lty = 2, lwd = 2.5) ``` ## Python ```{python} -var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +sigma2_x_hat_test = bart_model.predict(X=X_test, terms="variance_forest", type="mean") +lo, hi = ( + min(sigma2_x_hat_test.min(), (s_x_test**2).min()), + max(sigma2_x_hat_test.max(), (s_x_test**2).max()), +) +plt.scatter(sigma2_x_hat_test, s_x_test**2, s=10, alpha=0.6) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Variance function") plt.show() ``` :::: -## Demo 2: Variance-Only Simulation (Complex DGP) +# Demo 2: Variance-Only Simulation (Complex DGP) Here, we generate data with a constant (zero) mean and a more complex covariate-modified variance function. @@ -274,15 +277,16 @@ X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ \end{aligned} \end{equation*} -### Simulation +## Simulation + +We generate data from the DGP above ::::{.panel-tabset group="language"} ## R ```{r} -# Generate the data -n <- 500 +n <- 1000 p_x <- 10 X <- matrix(runif(n*p_x), ncol = p_x) f_XW <- 0 @@ -293,25 +297,12 @@ s_XW <- ( ((0.75 <= X[,1]) & (1 > X[,1])) * (3*X[,3]) ) y <- f_XW + rnorm(n, 0, 1)*s_XW - -# Split data into test and train sets -test_set_pct <- 0.2 -n_test <- round(test_set_pct*n) -n_train <- n - n_test -test_inds <- sort(sample(1:n, n_test, replace = FALSE)) -train_inds <- (1:n)[!((1:n) %in% test_inds)] -X_test <- as.data.frame(X[test_inds,]) -X_train <- as.data.frame(X[train_inds,]) -y_test <- y[test_inds] -y_train <- y[train_inds] -s_x_test <- s_XW[test_inds] -s_x_train <- s_XW[train_inds] ``` ## Python ```{python} -n, p_x = 500, 10 +n, p_x = 1000, 10 X = rng.uniform(size=(n, p_x)) # R's X[,3] = Python's X[:,2] s_XW = ( @@ -321,149 +312,153 @@ s_XW = ( ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * (3.0 * X[:, 2]) ) y = rng.normal(size=n) * s_XW - -test_set_pct = 0.2 -n_test = round(test_set_pct * n) -test_inds = rng.choice(n, n_test, replace=False) -train_inds = np.setdiff1d(np.arange(n), test_inds) -X_test, X_train = X[test_inds], X[train_inds] -y_test, y_train = y[test_inds], y[train_inds] -s_x_test, s_x_train = s_XW[test_inds], s_XW[train_inds] ``` :::: -### Sampling and Analysis - -#### Warmstart +And split the data into train and test sets ::::{.panel-tabset group="language"} ## R ```{r} -num_trees <- 20 -num_gfr <- 10 -num_burnin <- 0 -num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0, - alpha = 0.95, beta = 2, min_samples_leaf = 5) -variance_forest_params <- list(num_trees = num_trees, alpha = 0.95, - beta = 1.25, min_samples_leaf = 1) -bart_model_warmstart <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params -) +test_set_pct <- 0.2 +n_test <- round(test_set_pct*n) +n_train <- n - n_test +test_inds <- sort(sample(1:n, n_test, replace = FALSE)) +train_inds <- (1:n)[!((1:n) %in% test_inds)] +X_test <- as.data.frame(X[test_inds,]) +X_train <- as.data.frame(X[train_inds,]) +y_test <- y[test_inds] +y_train <- y[train_inds] +s_x_test <- s_XW[test_inds] +s_x_train <- s_XW[train_inds] ``` ## Python ```{python} -num_trees = 20 -bart_model_warmstart = BARTModel() -bart_model_warmstart.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=10, num_burnin=0, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, - mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0, - "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, - variance_forest_params={"num_trees": num_trees, "alpha": 0.95, - "beta": 1.25, "min_samples_leaf": 1}, -) +test_set_pct = 0.2 +n_test = round(test_set_pct * n) +test_inds = rng.choice(n, n_test, replace=False) +train_inds = np.setdiff1d(np.arange(n), test_inds) +X_test, X_train = X[test_inds], X[train_inds] +y_test, y_train = y[test_inds], y[train_inds] +s_x_test, s_x_train = s_XW[test_inds], s_XW[train_inds] ``` :::: -::::{.panel-tabset group="language"} - -## R - -```{r} -plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) -``` - -## Python +## Sampling and Analysis -```{python} -var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") -plt.show() -``` +We sample four chains of the $\sigma^2(X)$ forest using "warm-start" initialization (@he2023stochastic). -:::: - -#### MCMC +We use fewer trees for the variance forest than typically used for mean forests, and we disable sampling a global error scale and omit the mean forest by setting `num_trees = 0` in its parameter list. ::::{.panel-tabset group="language"} ## R ```{r} -num_gfr <- 0 -num_burnin <- 1000 +num_gfr <- 10 +num_burnin <- 0 num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0, - alpha = 0.95, beta = 2, min_samples_leaf = 5) -variance_forest_params <- list(num_trees = num_trees, alpha = 0.95, - beta = 1.25, min_samples_leaf = 1) -bart_model_mcmc <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params +num_trees <- 20 +num_samples <- num_gfr + num_burnin + num_mcmc +general_params <- list( + sample_sigma2_global = F, + num_chains = 4, + num_threads = 1, + random_seed = random_seed +) +mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 0) +variance_forest_params <- list(num_trees = num_trees) +bart_model <- stochtree::bart( + X_train = X_train, + y_train = y_train, + X_test = X_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params ) ``` ## Python ```{python} -bart_model_mcmc = BARTModel() -bart_model_mcmc.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=0, num_burnin=1000, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, - mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0, - "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, - variance_forest_params={"num_trees": num_trees, "alpha": 0.95, - "beta": 1.25, "min_samples_leaf": 1}, +num_gfr = 10 +num_burnin = 0 +num_mcmc = 100 +num_trees = 20 +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, + y_train=y_train, + X_test=X_test, + num_gfr=num_gfr, + num_burnin=num_burnin, + num_mcmc=num_mcmc, + general_params={ + "sample_sigma2_global": False, + "num_threads": 1, + "num_chains": 4, + "random_seed": random_seed, + }, + mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 0}, + variance_forest_params={"num_trees": num_trees}, ) ``` :::: +We inspect the model by plotting the true variance function against its forest-based predictions + ::::{.panel-tabset group="language"} ## R ```{r} -plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) +sigma2_x_hat_test <- predict( + bart_model, + X = X_test, + terms = "variance_forest", + type = "mean" +) +plot( + sigma2_x_hat_test, + s_x_test^2, + pch = 16, + cex = 0.75, + xlab = "Predicted", + ylab = "Actual", + main = "Variance function" +) +abline(0, 1, col = "red", lty = 2, lwd = 2.5) ``` ## Python ```{python} -var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +sigma2_x_hat_test = bart_model.predict(X=X_test, terms="variance_forest", type="mean") +lo, hi = ( + min(sigma2_x_hat_test.min(), (s_x_test**2).min()), + max(sigma2_x_hat_test.max(), (s_x_test**2).max()), +) +plt.scatter(sigma2_x_hat_test, s_x_test**2, s=10, alpha=0.6) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Variance function") plt.show() ``` :::: -## Demo 3: Mean and Variance Simulation (Simple DGP) +# Demo 3: Mean and Variance Function Simulation Here, we generate data with (relatively simple) covariate-modified mean and variance functions. @@ -488,15 +483,16 @@ X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ \end{aligned} \end{equation*} -### Simulation +## Simulation + +Generate data from the DGP above ::::{.panel-tabset group="language"} ## R ```{r} -# Generate the data -n <- 500 +n <- 1000 p_x <- 10 X <- matrix(runif(n*p_x), ncol = p_x) f_XW <- ( @@ -512,27 +508,13 @@ s_XW <- ( ((0.75 <= X[,1]) & (1 > X[,1])) * (3) ) y <- f_XW + rnorm(n, 0, 1)*s_XW - -# Split data into test and train sets -test_set_pct <- 0.2 -n_test <- round(test_set_pct*n) -n_train <- n - n_test -test_inds <- sort(sample(1:n, n_test, replace = FALSE)) -train_inds <- (1:n)[!((1:n) %in% test_inds)] -X_test <- as.data.frame(X[test_inds,]) -X_train <- as.data.frame(X[train_inds,]) -y_test <- y[test_inds] -y_train <- y[train_inds] -f_x_test <- f_XW[test_inds] -s_x_test <- s_XW[test_inds] ``` ## Python ```{python} -n, p_x = 500, 10 +n, p_x = 1000, 10 X = rng.uniform(size=(n, p_x)) -# R's X[,2] = Python's X[:,1]; R's X[,1] = Python's X[:,0] f_XW = ( ((X[:, 1] >= 0) & (X[:, 1] < 0.25)) * (-6) + ((X[:, 1] >= 0.25) & (X[:, 1] < 0.5)) * (-2) + @@ -546,218 +528,17 @@ s_XW = ( ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * 3.0 ) y = f_XW + rng.normal(size=n) * s_XW - -test_set_pct = 0.2 -n_test = round(test_set_pct * n) -test_inds = rng.choice(n, n_test, replace=False) -train_inds = np.setdiff1d(np.arange(n), test_inds) -X_test, X_train = X[test_inds], X[train_inds] -y_test, y_train = y[test_inds], y[train_inds] -f_x_test = f_XW[test_inds] -s_x_test = s_XW[test_inds] -``` - -:::: - -### Sampling and Analysis - -#### Warmstart - -::::{.panel-tabset group="language"} - -## R - -```{r} -num_gfr <- 10 -num_burnin <- 0 -num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, - alpha = 0.95, beta = 2, min_samples_leaf = 5) -variance_forest_params <- list(num_trees = 50, alpha = 0.95, - beta = 1.25, min_samples_leaf = 5) -bart_model_warmstart <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params -) -``` - -## Python - -```{python} -bart_model_warmstart = BARTModel() -bart_model_warmstart.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=10, num_burnin=0, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, - mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, - "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, - variance_forest_params={"num_trees": 50, "alpha": 0.95, - "beta": 1.25, "min_samples_leaf": 5}, -) -``` - -:::: - -::::{.panel-tabset group="language"} - -## R - -```{r} -plot(rowMeans(bart_model_warmstart$y_hat_test), y_test, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") -abline(0,1,col="red",lty=2,lwd=2.5) -plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) -``` - -## Python - -```{python} -mean_pred = bart_model_warmstart.y_hat_test.mean(axis=1) -lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) -plt.scatter(mean_pred, y_test, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") -plt.show() - -var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") -plt.show() -``` - -:::: - -#### MCMC - -::::{.panel-tabset group="language"} - -## R - -```{r} -num_gfr <- 0 -num_burnin <- 1000 -num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, - alpha = 0.95, beta = 2, min_samples_leaf = 5) -variance_forest_params <- list(num_trees = 50, alpha = 0.95, - beta = 1.25, min_samples_leaf = 5) -bart_model_mcmc <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params -) -``` - -## Python - -```{python} -bart_model_mcmc = BARTModel() -bart_model_mcmc.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=0, num_burnin=1000, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, - mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, - "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, - variance_forest_params={"num_trees": 50, "alpha": 0.95, - "beta": 1.25, "min_samples_leaf": 5}, -) -``` - -:::: - -::::{.panel-tabset group="language"} - -## R - -```{r} -plot(rowMeans(bart_model_mcmc$y_hat_test), y_test, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") -abline(0,1,col="red",lty=2,lwd=2.5) -plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) -``` - -## Python - -```{python} -mean_pred = bart_model_mcmc.y_hat_test.mean(axis=1) -lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) -plt.scatter(mean_pred, y_test, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") -plt.show() - -var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") -plt.show() ``` :::: -## Demo 4: Mean and Variance Simulation (Complex DGP) - -Here, we generate data with more complex covariate-modified mean and variance -functions. - -\begin{equation*} -\begin{aligned} -y &= f(X) + \sigma(X) \epsilon\\ -f(X) &= \begin{cases} --6X_4 & X_2 \geq 0 \text{ and } X_2 < 0.25\\ --2X_4 & X_2 \geq 0.25 \text{ and } X_2 < 0.5\\ -2X_4 & X_2 \geq 0.5 \text{ and } X_2 < 0.75\\ -6X_4 & X_2 \geq 0.75 \text{ and } X_2 < 1\\ -\end{cases}\\ -\sigma^2(X) &= \begin{cases} -0.25X_3^2 & X_1 \geq 0 \text{ and } X_1 < 0.25\\ -1X_3^2 & X_1 \geq 0.25 \text{ and } X_1 < 0.5\\ -4X_3^2 & X_1 \geq 0.5 \text{ and } X_1 < 0.75\\ -9X_3^2 & X_1 \geq 0.75 \text{ and } X_1 < 1\\ -\end{cases}\\ -X_1,\dots,X_p &\sim \text{U}\left(0,1\right)\\ -\epsilon &\sim \mathcal{N}\left(0,1\right) -\end{aligned} -\end{equation*} - -### Simulation +Split the data into train and test sets ::::{.panel-tabset group="language"} ## R ```{r} -# Generate the data -n <- 500 -p_x <- 10 -X <- matrix(runif(n*p_x), ncol = p_x) -f_XW <- ( - ((0 <= X[,2]) & (0.25 > X[,2])) * (-6*X[,4]) + - ((0.25 <= X[,2]) & (0.5 > X[,2])) * (-2*X[,4]) + - ((0.5 <= X[,2]) & (0.75 > X[,2])) * (2*X[,4]) + - ((0.75 <= X[,2]) & (1 > X[,2])) * (6*X[,4]) -) -s_XW <- ( - ((0 <= X[,1]) & (0.25 > X[,1])) * (0.5*X[,3]) + - ((0.25 <= X[,1]) & (0.5 > X[,1])) * (1*X[,3]) + - ((0.5 <= X[,1]) & (0.75 > X[,1])) * (2*X[,3]) + - ((0.75 <= X[,1]) & (1 > X[,1])) * (3*X[,3]) -) -y <- f_XW + rnorm(n, 0, 1)*s_XW - -# Split data into test and train sets test_set_pct <- 0.2 n_test <- round(test_set_pct*n) n_train <- n - n_test @@ -774,23 +555,6 @@ s_x_test <- s_XW[test_inds] ## Python ```{python} -n, p_x = 500, 10 -X = rng.uniform(size=(n, p_x)) -# R's X[,2]=X[:,1], X[,4]=X[:,3], X[,1]=X[:,0], X[,3]=X[:,2] -f_XW = ( - ((X[:, 1] >= 0) & (X[:, 1] < 0.25)) * (-6 * X[:, 3]) + - ((X[:, 1] >= 0.25) & (X[:, 1] < 0.5)) * (-2 * X[:, 3]) + - ((X[:, 1] >= 0.5) & (X[:, 1] < 0.75)) * (2 * X[:, 3]) + - ((X[:, 1] >= 0.75) & (X[:, 1] < 1.0)) * (6 * X[:, 3]) -) -s_XW = ( - ((X[:, 0] >= 0) & (X[:, 0] < 0.25)) * (0.5 * X[:, 2]) + - ((X[:, 0] >= 0.25) & (X[:, 0] < 0.5)) * (1.0 * X[:, 2]) + - ((X[:, 0] >= 0.5) & (X[:, 0] < 0.75)) * (2.0 * X[:, 2]) + - ((X[:, 0] >= 0.75) & (X[:, 0] < 1.0)) * (3.0 * X[:, 2]) -) -y = f_XW + rng.normal(size=n) * s_XW - test_set_pct = 0.2 n_test = round(test_set_pct * n) test_inds = rng.choice(n, n_test, replace=False) @@ -803,9 +567,9 @@ s_x_test = s_XW[test_inds] :::: -### Sampling and Analysis +## Sampling and Analysis -#### Warmstart +As above, we sample four chains of the $\sigma^2(X)$ forest using "warm-start" initialization (@he2023stochastic), except we do not omit the mean forest by setting `num_trees = 0`. ::::{.panel-tabset group="language"} @@ -815,137 +579,154 @@ s_x_test = s_XW[test_inds] num_gfr <- 10 num_burnin <- 0 num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, - alpha = 0.95, beta = 2, min_samples_leaf = 5) -variance_forest_params <- list(num_trees = 50, alpha = 0.95, - beta = 1.25, min_samples_leaf = 5) -bart_model_warmstart <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params +general_params <- list( + sample_sigma2_global = F, + num_threads = 1, + num_chains = 4, + random_seed = random_seed +) +mean_forest_params <- list( + sample_sigma2_leaf = F, + num_trees = 50, + alpha = 0.95, + beta = 2, + min_samples_leaf = 5 +) +variance_forest_params <- list( + num_trees = 50, + alpha = 0.95, + beta = 1.25, + min_samples_leaf = 5 +) +bart_model <- stochtree::bart( + X_train = X_train, + y_train = y_train, + X_test = X_test, + num_gfr = num_gfr, + num_burnin = num_burnin, + num_mcmc = num_mcmc, + general_params = general_params, + mean_forest_params = mean_forest_params, + variance_forest_params = variance_forest_params ) ``` ## Python ```{python} -bart_model_warmstart = BARTModel() -bart_model_warmstart.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=10, num_burnin=0, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, - mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, - "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, - variance_forest_params={"num_trees": 50, "alpha": 0.95, - "beta": 1.25, "min_samples_leaf": 5}, +bart_model = BARTModel() +bart_model.sample( + X_train=X_train, + y_train=y_train, + X_test=X_test, + num_gfr=10, + num_burnin=0, + num_mcmc=100, + general_params={ + "sample_sigma2_global": False, + "num_threads": 1, + "num_chains": 4, + "random_seed": random_seed, + }, + mean_forest_params={ + "sample_sigma2_leaf": False, + "num_trees": 50, + "alpha": 0.95, + "beta": 2, + "min_samples_leaf": 5, + }, + variance_forest_params={ + "num_trees": 50, + "alpha": 0.95, + "beta": 1.25, + "min_samples_leaf": 5, + }, ) ``` :::: +We inspect the model by plotting the true variance function against the variance forest predictions + ::::{.panel-tabset group="language"} ## R ```{r} -plot(rowMeans(bart_model_warmstart$y_hat_test), y_test, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") -abline(0,1,col="red",lty=2,lwd=2.5) -plot(rowMeans(bart_model_warmstart$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) +sigma2_x_hat_test <- predict( + bart_model, + X = X_test, + terms = "variance_forest", + type = "mean" +) +plot( + sigma2_x_hat_test, + s_x_test^2, + pch = 16, + cex = 0.75, + xlab = "Predicted", + ylab = "Actual", + main = "Variance function" +) +abline(0, 1, col = "red", lty = 2, lwd = 2.5) ``` ## Python ```{python} -mean_pred = bart_model_warmstart.y_hat_test.mean(axis=1) -lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) -plt.scatter(mean_pred, y_test, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") -plt.show() - -var_pred = bart_model_warmstart.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +sigma2_x_hat_test = bart_model.predict(X=X_test, terms="variance_forest", type="mean") +lo, hi = ( + min(sigma2_x_hat_test.min(), (s_x_test**2).min()), + max(sigma2_x_hat_test.max(), (s_x_test**2).max()), +) +plt.scatter(sigma2_x_hat_test, s_x_test**2, s=10, alpha=0.6) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Variance function") plt.show() ``` :::: -#### MCMC +We also plot the true outcome against mean forest predictions ::::{.panel-tabset group="language"} ## R ```{r} -num_gfr <- 0 -num_burnin <- 1000 -num_mcmc <- 100 -general_params <- list(sample_sigma2_global = F) -mean_forest_params <- list(sample_sigma2_leaf = F, num_trees = 50, - alpha = 0.95, beta = 2, min_samples_leaf = 5) -variance_forest_params <- list(num_trees = 50, alpha = 0.95, - beta = 1.25, min_samples_leaf = 5) -bart_model_mcmc <- stochtree::bart( - X_train = X_train, y_train = y_train, X_test = X_test, - num_gfr = num_gfr, num_burnin = num_burnin, num_mcmc = num_mcmc, - general_params = general_params, mean_forest_params = mean_forest_params, - variance_forest_params = variance_forest_params +y_hat_test <- predict( + bart_model, + X = X_test, + terms = "y_hat", + type = "mean" ) -``` - -## Python - -```{python} -bart_model_mcmc = BARTModel() -bart_model_mcmc.sample( - X_train=X_train, y_train=y_train, X_test=X_test, - num_gfr=0, num_burnin=1000, num_mcmc=100, - general_params={"num_threads": 1, "sample_sigma2_global": False}, - mean_forest_params={"sample_sigma2_leaf": False, "num_trees": 50, - "alpha": 0.95, "beta": 2, "min_samples_leaf": 5}, - variance_forest_params={"num_trees": 50, "alpha": 0.95, - "beta": 1.25, "min_samples_leaf": 5}, +plot( + y_hat_test, + y_test, + pch = 16, + cex = 0.75, + xlab = "Predicted", + ylab = "Actual", + main = "Outcome" ) -``` - -:::: - -::::{.panel-tabset group="language"} - -## R - -```{r} -plot(rowMeans(bart_model_mcmc$y_hat_test), y_test, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "mean function") -abline(0,1,col="red",lty=2,lwd=2.5) -plot(rowMeans(bart_model_mcmc$sigma2_x_hat_test), s_x_test^2, - pch=16, cex=0.75, xlab = "pred", ylab = "actual", main = "variance function") -abline(0,1,col="red",lty=2,lwd=2.5) +abline(0, 1, col = "red", lty = 2, lwd = 2.5) ``` ## Python ```{python} -mean_pred = bart_model_mcmc.y_hat_test.mean(axis=1) -lo, hi = min(mean_pred.min(), y_test.min()), max(mean_pred.max(), y_test.max()) -plt.scatter(mean_pred, y_test, s=10, alpha=0.6) -plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Mean function") -plt.show() - -var_pred = bart_model_mcmc.sigma2_x_test.mean(axis=1) -lo, hi = min(var_pred.min(), (s_x_test**2).min()), max(var_pred.max(), (s_x_test**2).max()) -plt.scatter(var_pred, s_x_test**2, s=10, alpha=0.6) +y_hat_test = bart_model.predict(X=X_test, terms="y_hat", type="mean") +lo, hi = ( + min(y_hat_test.min(), y_test.min()), + max(y_hat_test.max(), y_test.max()), +) +plt.scatter(y_hat_test, y_test, s=10, alpha=0.6) plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) -plt.xlabel("Predicted"); plt.ylabel("Actual"); plt.title("Variance function") +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Outcome") plt.show() ``` diff --git a/vignettes/iv.qmd b/vignettes/iv.qmd index 1f21c6c05..13b722159 100644 --- a/vignettes/iv.qmd +++ b/vignettes/iv.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/multi-chain.qmd b/vignettes/multi-chain.qmd index beed218e4..40204324a 100644 --- a/vignettes/multi-chain.qmd +++ b/vignettes/multi-chain.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/multivariate-bcf.qmd b/vignettes/multivariate-bcf.qmd index 90b07e2d7..1e239d1f5 100644 --- a/vignettes/multivariate-bcf.qmd +++ b/vignettes/multivariate-bcf.qmd @@ -9,29 +9,28 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) ``` -BCF extended to vector-valued (multivariate) treatments, estimating heterogeneous -effects for multiple treatment arms simultaneously. +BCF extended to vector-valued (multivariate) treatments, estimating heterogeneous effects for multiple treatment arms simultaneously. ## Background -When treatments are multivariate — such as continuous dose vectors or multiple -binary arms — the standard BCF model extends to: +When treatments are multivariate — such as continuous dose vectors or multiple binary arms — the standard BCF model extends to $$ Y_i = \mu(X_i) + \tau(X_i)^\top Z_i + \epsilon_i $$ -where $Z_i \in \mathbb{R}^p$ and $\tau(X_i) \in \mathbb{R}^p$ is a vector of -covariate-varying treatment effects. +where $Z_i \in \mathbb{R}^p$ and $\tau(X_i) \in \mathbb{R}^p$ is a vector of covariate-dependent treatment effects. ## Setup +Load necessary packages + ::::{.panel-tabset group="language"} ## R @@ -45,15 +44,32 @@ library(stochtree) ```{python} import matplotlib.pyplot as plt import numpy as np -import pandas as pd -import seaborn as sns from sklearn.model_selection import train_test_split - from stochtree import BCFModel ``` :::: +Set a seed for reproducibility + +::::{.panel-tabset group="language"} + +## R + +```{r} +random_seed <- 4321 +set.seed(random_seed) +``` + +## Python + +```{python} +random_seed = 4321 +rng = np.random.default_rng(random_seed) +``` + +:::: + ## Data Simulation ::::{.panel-tabset group="language"} @@ -61,12 +77,15 @@ from stochtree import BCFModel ## R ```{r} -# Generate covariates and propensity scores -n <- 500 +# Generate covariates, propensities, and treatments +n <- 1000 p_X <- 5 X <- matrix(runif(n * p_X), nrow = n, ncol = p_X) pi_X <- cbind(0.25 + 0.5 * X[, 1], 0.75 - 0.5 * X[, 2]) -Z <- cbind(rbinom(n, 1, pi_X[, 1]), rbinom(n, 1, pi_X[, 2])) +Z <- cbind( + as.numeric(rbinom(n, 1, pi_X[, 1])), + as.numeric(rbinom(n, 1, pi_X[, 2])) +) # Define outcome mean functions (prognostic and treatment effects) mu_X <- pi_X[, 1] * 5 + pi_X[, 2] * 2 + 2 * X[, 3] @@ -80,15 +99,12 @@ y <- mu_X + treatment_term + rnorm(n) ## Python ```{python} -# RNG -rng = np.random.default_rng() - -# Generate covariates and basis -n = 500 +# Generate covariates, propensities, and treatments +n = 1000 p_X = 5 X = rng.uniform(0, 1, (n, p_X)) pi_X = np.c_[0.25 + 0.5 * X[:, 0], 0.75 - 0.5 * X[:, 1]] -Z = rng.binomial(1, pi_X, (n, 2)) +Z = rng.binomial(1, pi_X, (n, 2)).astype(float) # Define the outcome mean functions (prognostic and treatment effects) mu_X = pi_X[:, 0] * 5 + pi_X[:, 1] * 2 + 2 * X[:, 2] @@ -102,31 +118,35 @@ y = mu_X + treatment_term + epsilon :::: +Split the data into train and test sets + ::::{.panel-tabset group="language"} ## R ```{r} -n_test <- round(n * 0.5) +n_test <- round(n * 0.2) test_inds <- sort(sample(seq_len(n), n_test, replace = FALSE)) train_inds <- setdiff(seq_len(n), test_inds) X_train <- X[train_inds, ] -X_test <- X[test_inds, ] +X_test <- X[test_inds, ] Z_train <- Z[train_inds, ] -Z_test <- Z[test_inds, ] +Z_test <- Z[test_inds, ] y_train <- y[train_inds] -y_test <- y[test_inds] -mu_train <- mu_X[train_inds] -mu_test <- mu_X[test_inds] +y_test <- y[test_inds] +pi_train <- pi_X[train_inds, ] +pi_test <- pi_X[test_inds, ] +mu_train <- mu_X[train_inds] +mu_test <- mu_X[test_inds] tau_train <- tau_X[train_inds, ] -tau_test <- tau_X[test_inds, ] +tau_test <- tau_X[test_inds, ] ``` ## Python ```{python} sample_inds = np.arange(n) -train_inds, test_inds = train_test_split(sample_inds, test_size=0.5) +train_inds, test_inds = train_test_split(sample_inds, test_size=0.2) X_train = X[train_inds, :] X_test = X[test_inds, :] Z_train = Z[train_inds, :] @@ -145,37 +165,50 @@ tau_test = tau_X[test_inds, :] ## Model Fitting +Fit a multivariate BCF model + ::::{.panel-tabset group="language"} ## R ```{r} -# Note: propensity adjustment is not supported for multivariate treatment in bcf() +general_params <- list( + num_threads = 1, + num_chains = 4, + random_seed = random_seed, + adaptive_coding = FALSE +) bcf_model <- bcf( X_train = X_train, Z_train = Z_train, y_train = y_train, - X_test = X_test, - Z_test = Z_test, + propensity_train = pi_train, num_gfr = 10, - num_mcmc = 100 + num_burnin = 500, + num_mcmc = 100, + general_params = general_params ) ``` ## Python ```{python} +general_params = { + "num_threads": 1, + "num_chains": 4, + "random_seed": random_seed, + "adaptive_coding": False +} bcf_model = BCFModel() bcf_model.sample( X_train=X_train, Z_train=Z_train, y_train=y_train, propensity_train=pi_train, - X_test=X_test, - Z_test=Z_test, - propensity_test=pi_test, num_gfr=10, + num_burnin=500, num_mcmc=100, + general_params=general_params, ) ``` @@ -183,67 +216,82 @@ bcf_model.sample( ## Posterior Summaries -### Outcome +Compare true outcomes to predicted conditional means ::::{.panel-tabset group="language"} ## R ```{r} -# y_hat_test is (n_test x num_samples) +y_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "y_hat", + type = "mean" +) plot( - rowMeans(bcf_model$y_hat_test), y_test, - xlab = "Average estimated outcome", ylab = "True outcome" + y_hat_test, + y_test, + xlab = "Average estimated outcome", + ylab = "True outcome" ) abline(0, 1, col = "black", lty = 3) -``` - -```{r} -sqrt(mean((rowMeans(bcf_model$y_hat_test) - y_test)^2)) +rmse <- sqrt(mean((y_hat_test - y_test)^2)) +cat("Test-set RMSE: ", rmse, "\n") ``` ## Python ```{python} -forest_preds_y_mcmc = bcf_model.y_hat_test -y_avg_mcmc = np.squeeze(forest_preds_y_mcmc).mean(axis=1, keepdims=True) -y_df_mcmc = pd.DataFrame( - np.concatenate((np.expand_dims(y_test, 1), y_avg_mcmc), axis=1), - columns=["True outcome", "Average estimated outcome"], +y_hat_test = bcf_model.predict( + X=X_test, Z=Z_test, propensity=pi_test, terms="y_hat", type="mean" ) -sns.scatterplot(data=y_df_mcmc, x="Average estimated outcome", y="True outcome") -plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +lo, hi = min(y_hat_test.min(), y_test.min()), max(y_hat_test.max(), y_test.max()) +plt.scatter(y_hat_test, y_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("Predicted") +plt.ylabel("Actual") +plt.title("Outcome") plt.show() -``` - -```{python} -np.sqrt(np.mean(np.power(y_avg_mcmc - y_test, 2))) +rmse = np.sqrt(np.mean(np.power(y_hat_test - y_test, 2))) +print(f"Test-set RMSE: {rmse:.2f}") ``` :::: -### Treatment Effects +Compare true versus estimated treatment effects for each treatment entry ::::{.panel-tabset group="language"} ## R ```{r} -# tau_hat_test is (n_test x p_Z x num_samples) for multivariate treatment -tau_avg_1 <- rowMeans(bcf_model$tau_hat_test[, 1, ]) +tau_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "cate", + type = "mean" +) plot( - tau_test[, 1], tau_avg_1, - xlab = "True tau", ylab = "Average estimated tau", + tau_test[, 1], + tau_hat_test[, 1], + xlab = "True tau", + ylab = "Average estimated tau", main = "Treatment 1" ) abline(0, 1, col = "black", lty = 3) ``` ```{r} -tau_avg_2 <- rowMeans(bcf_model$tau_hat_test[, 2, ]) plot( - tau_test[, 2], tau_avg_2, - xlab = "True tau", ylab = "Average estimated tau", + tau_test[, 2], + tau_hat_test[, 2], + xlab = "True tau", + ylab = "Average estimated tau", main = "Treatment 2" ) abline(0, 1, col = "black", lty = 3) @@ -252,52 +300,62 @@ abline(0, 1, col = "black", lty = 3) ## Python ```{python} +tau_hat_test = bcf_model.predict( + X=X_test, Z=Z_test, propensity=pi_test, terms="cate", type="mean" +) treatment_idx = 0 -forest_preds_tau_mcmc = np.squeeze(bcf_model.tau_hat_test[:, :, treatment_idx]) -tau_avg_mcmc = np.squeeze(forest_preds_tau_mcmc).mean(axis=1, keepdims=True) -tau_df_mcmc = pd.DataFrame( - np.concatenate( - (np.expand_dims(tau_test[:, treatment_idx], 1), tau_avg_mcmc), axis=1 - ), - columns=["True tau", "Average estimated tau"], +lo, hi = ( + min((tau_hat_test[:, treatment_idx]).min(), (tau_test[:, treatment_idx]).min()), + max((tau_hat_test[:, treatment_idx]).max(), (tau_test[:, treatment_idx]).max()), ) -sns.scatterplot(data=tau_df_mcmc, x="True tau", y="Average estimated tau") -plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.scatter(tau_test[:, treatment_idx], tau_hat_test[:, treatment_idx], alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("True tau") +plt.ylabel("Average estimated tau") +plt.title(f"Treatment {treatment_idx + 1}") plt.show() ``` ```{python} treatment_idx = 1 -forest_preds_tau_mcmc = np.squeeze(bcf_model.tau_hat_test[:, :, treatment_idx]) -tau_avg_mcmc = np.squeeze(forest_preds_tau_mcmc).mean(axis=1, keepdims=True) -tau_df_mcmc = pd.DataFrame( - np.concatenate( - (np.expand_dims(tau_test[:, treatment_idx], 1), tau_avg_mcmc), axis=1 - ), - columns=["True tau", "Average estimated tau"], +lo, hi = ( + min((tau_hat_test[:, treatment_idx]).min(), (tau_test[:, treatment_idx]).min()), + max((tau_hat_test[:, treatment_idx]).max(), (tau_test[:, treatment_idx]).max()), ) -sns.scatterplot(data=tau_df_mcmc, x="True tau", y="Average estimated tau") -plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.scatter(tau_test[:, treatment_idx], tau_hat_test[:, treatment_idx], alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("True tau") +plt.ylabel("Average estimated tau") +plt.title(f"Treatment {treatment_idx + 1}") plt.show() ``` :::: -### Treatment Term +Now compare the true versus estimated treatment terms of the model (i.e. $t_i = \sum_j(\tau_{i,j}(X) * Z_{i,j})$ where $i$ indexes observations and $j$ indexes treatments) ::::{.panel-tabset group="language"} ## R ```{r} -# Compute sum_j(tau_hat(X)_j * Z_j) per observation per sample -treatment_term_mcmc <- apply(bcf_model$tau_hat_test, 3, function(tau_s) { +tau_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "cate", + type = "posterior" +) +treatment_term_mcmc <- apply(tau_hat_test, 3, function(tau_s) { rowSums(tau_s * Z_test) }) true_treatment_term <- rowSums(tau_test * Z_test) plot( - true_treatment_term, rowMeans(treatment_term_mcmc), - xlab = "True treatment term", ylab = "Average estimated treatment term" + true_treatment_term, + rowMeans(treatment_term_mcmc), + xlab = "True treatment term", + ylab = "Average estimated treatment term" ) abline(0, 1, col = "black", lty = 3) ``` @@ -305,38 +363,51 @@ abline(0, 1, col = "black", lty = 3) ## Python ```{python} +tau_hat_test = bcf_model.predict( + X=X_test, Z=Z_test, propensity=pi_test, terms="cate", type="posterior" +) treatment_term_mcmc_test = np.multiply( - np.atleast_3d(Z_test).swapaxes(1, 2), bcf_model.tau_hat_test + np.atleast_3d(Z_test).swapaxes(1, 2), tau_hat_test ).sum(axis=2) treatment_term_test = np.multiply(tau_test, Z_test).sum(axis=1) -treatment_term_mcmc_avg = np.squeeze(treatment_term_mcmc_test).mean( +treatment_term_hat_test = np.squeeze(treatment_term_mcmc_test).mean( axis=1, keepdims=True ) -mu_df_mcmc = pd.DataFrame( - np.concatenate( - (np.expand_dims(treatment_term_test, 1), treatment_term_mcmc_avg), axis=1 - ), - columns=["True treatment term", "Average estimated treatment term"], +lo, hi = ( + min((treatment_term_hat_test).min(), (treatment_term_test).min()), + max((treatment_term_hat_test).max(), (treatment_term_test).max()), ) -sns.scatterplot( - data=mu_df_mcmc, x="True treatment term", y="Average estimated treatment term" -) -plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +plt.scatter(treatment_term_test, treatment_term_hat_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("True value") +plt.ylabel("Average estimated value") +plt.title("Treatment Term") plt.show() ``` :::: -### Prognostic Function +Compare true and predicted prognostic function values ::::{.panel-tabset group="language"} ## R ```{r} +mu_hat_test <- predict( + bcf_model, + X = X_test, + Z = Z_test, + propensity = pi_test, + terms = "prognostic_function", + type = "mean" +) plot( - mu_test, rowMeans(bcf_model$mu_hat_test), - xlab = "True mu", ylab = "Average estimated mu" + mu_test, + mu_hat_test, + xlab = "True value", + ylab = "Average estimated value", + main = "Prognostic Function" ) abline(0, 1, col = "black", lty = 3) ``` @@ -344,48 +415,48 @@ abline(0, 1, col = "black", lty = 3) ## Python ```{python} -forest_preds_mu_mcmc = bcf_model.mu_hat_test -mu_avg_mcmc = np.squeeze(forest_preds_mu_mcmc).mean(axis=1, keepdims=True) -mu_df_mcmc = pd.DataFrame( - np.concatenate((np.expand_dims(mu_test, 1), mu_avg_mcmc), axis=1), - columns=["True mu", "Average estimated mu"], +mu_hat_test = bcf_model.predict( + X=X_test, Z=Z_test, propensity=pi_test, terms="prognostic_function", type="mean" ) -sns.scatterplot(data=mu_df_mcmc, x="True mu", y="Average estimated mu") -plt.axline((0, 0), slope=1, color="black", linestyle=(0, (3, 3))) +lo, hi = ( + min((mu_hat_test).min(), (mu_test).min()), + max((mu_hat_test).max(), (mu_test).max()), +) +plt.scatter(mu_hat_test, mu_test, alpha=0.5) +plt.plot([lo, hi], [lo, hi], color="red", linestyle="dashed", linewidth=2) +plt.xlabel("True value") +plt.ylabel("Average estimated value") +plt.title("Prognostic Function") plt.show() ``` :::: -### Global Error Variance +Finally, we inspect the traceplot of the global error variance, $\sigma^2$ ::::{.panel-tabset group="language"} ## R ```{r} +sigma2_global_samples <- extractParameter(bcf_model, "sigma2_global") plot( - bcf_model$sigma2_global_samples, - xlab = "Sample", ylab = expression(sigma^2) + sigma2_global_samples, + xlab = "Sample", + ylab = expression(sigma^2) ) +abline(h = 1, lty = 3, lwd = 3, col = "blue") ``` ## Python ```{python} -sigma_df_mcmc = pd.DataFrame( - np.concatenate( - ( - np.expand_dims( - np.arange(bcf_model.num_samples - bcf_model.num_gfr), axis=1 - ), - np.expand_dims(bcf_model.global_var_samples[bcf_model.num_gfr:], axis=1), - ), - axis=1, - ), - columns=["Sample", "Sigma"], -) -sns.scatterplot(data=sigma_df_mcmc, x="Sample", y="Sigma") +global_var_samples = bcf_model.extract_parameter("sigma2_global") +plt.plot(global_var_samples) +plt.axhline(1, color="blue", linestyle="dashed", linewidth=2) +plt.xlabel("Sample") +plt.ylabel(r"$\sigma^2$") +plt.title("Global variance parameter") plt.show() ``` diff --git a/vignettes/ordinal-outcome.qmd b/vignettes/ordinal-outcome.qmd index 725a8f19f..b39bd59f7 100644 --- a/vignettes/ordinal-outcome.qmd +++ b/vignettes/ordinal-outcome.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/prior-calibration.qmd b/vignettes/prior-calibration.qmd index 09a4ea0e4..a518169b4 100644 --- a/vignettes/prior-calibration.qmd +++ b/vignettes/prior-calibration.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/rdd.qmd b/vignettes/rdd.qmd index 1a11604c8..44253e1bc 100644 --- a/vignettes/rdd.qmd +++ b/vignettes/rdd.qmd @@ -26,7 +26,7 @@ $$ reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/serialization.qmd b/vignettes/serialization.qmd index e0d7bcd59..5ec5a58e2 100644 --- a/vignettes/serialization.qmd +++ b/vignettes/serialization.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/sklearn.qmd b/vignettes/sklearn.qmd index 839cbeab1..fc40e8075 100644 --- a/vignettes/sklearn.qmd +++ b/vignettes/sklearn.qmd @@ -9,7 +9,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/summary-plotting.qmd b/vignettes/summary-plotting.qmd index 516ed1c89..a2b9f6381 100644 --- a/vignettes/summary-plotting.qmd +++ b/vignettes/summary-plotting.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) @@ -117,7 +117,10 @@ Now we fit a simple BART model to the data. num_gfr <- 10 num_burnin <- 0 num_mcmc <- 1000 -general_params <- list(num_chains = 3) +general_params <- list( + num_threads = 1, + num_chains = 3 +) bart_model <- stochtree::bart( X_train = X, y_train = y, @@ -140,7 +143,10 @@ bart_model.sample( num_gfr=10, num_burnin=0, num_mcmc=1000, - general_params={"num_threads": 1, "num_chains": 3}, + general_params={ + "num_threads": 1, + "num_chains": 3 + }, ) ``` @@ -230,7 +236,7 @@ plot( ```{python} y_hat_train_samples = bart_model.extract_parameter("y_hat_train") -obs_index = 0 # 0-indexed (R uses 1) +obs_index = 0 fig, ax = plt.subplots() ax.plot(y_hat_train_samples[obs_index, :]) ax.set_title(f"In-Sample Predictions Traceplot, Observation {obs_index}") @@ -271,14 +277,18 @@ y = mu_X + tau_X * Z + epsilon ## Python ```{python} +# Generate covariates and treatment n = 1000 p_X = 5 X = rng.uniform(size=(n, p_X)) -# R uses X[,1], X[,3], X[,2] (1-indexed) = Python X[:,0], X[:,2], X[:,1] pi_X = 0.25 + 0.5 * X[:, 0] Z = rng.binomial(1, pi_X, n).astype(float) + +# Define the outcome mean functions (prognostic and treatment effects) mu_X = pi_X * 5 + 2 * X[:, 2] tau_X = X[:, 1] * 2 - 1 + +# Generate outcome epsilon = rng.standard_normal(n) y = mu_X + tau_X * Z + epsilon ``` @@ -295,7 +305,11 @@ Now we fit a simple BCF model to the data num_gfr <- 10 num_burnin <- 0 num_mcmc <- 1000 -general_params <- list(num_chains = 3) +general_params <- list( + num_threads = 1, + num_chains = 3, + adaptive_coding = TRUE +) bcf_model <- stochtree::bcf( X_train = X, y_train = y, @@ -319,7 +333,11 @@ bcf_model.sample( num_gfr=10, num_burnin=0, num_mcmc=1000, - general_params={"num_threads": 1, "num_chains": 3}, + general_params={ + "num_threads": 1, + "num_chains": 3, + "adaptive_coding": True + }, ) ``` diff --git a/vignettes/tree-inspection.qmd b/vignettes/tree-inspection.qmd index e7af380d8..94d9098b6 100644 --- a/vignettes/tree-inspection.qmd +++ b/vignettes/tree-inspection.qmd @@ -10,7 +10,7 @@ execute: reticulate::use_python( Sys.getenv( "RETICULATE_PYTHON", - unset = file.path(here::here(), ".venv", "bin", "python") + unset = file.path(rprojroot::find_root(rprojroot::has_file(".here")), ".venv", "bin", "python") ), required = TRUE ) diff --git a/vignettes/vignettes.bib b/vignettes/vignettes.bib index afe346880..3de94125f 100644 --- a/vignettes/vignettes.bib +++ b/vignettes/vignettes.bib @@ -1,3 +1,11 @@ +@book{gelman2013bayesian, + title={Bayesian Data Analysis}, + edition={Third}, + author={Gelman, Andrew and Carlin, John B and Stern, Hal S and Dunson, David B and Vehtari, Aki and Rubin, Donald B}, + year={2013}, + publisher={Chapman and Hall/CRC} +} + @article{friedman1991multivariate, title={Multivariate adaptive regression splines}, author={Friedman, Jerome H}, From 2900a2f517be59dd156aedbe62132a983c06e7ae Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Thu, 26 Mar 2026 02:30:10 -0500 Subject: [PATCH 6/8] Add reticulate to R dependencies in CI Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/docs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index f4fc96d22..5d0cf2a90 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -76,7 +76,7 @@ jobs: uses: r-lib/actions/setup-r-dependencies@v2 with: working-directory: 'stochtree_repo' - extra-packages: any::latex2exp, any::ggplot2, any::decor, any::pkgdown + extra-packages: any::latex2exp, any::ggplot2, any::decor, any::pkgdown, any::reticulate needs: website - name: Build R doc site From 5ae6e0b2607d1925a845216c3240df2f46d0590a Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Thu, 26 Mar 2026 02:34:31 -0500 Subject: [PATCH 7/8] Add all vignette R package dependencies to CI Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/docs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 5d0cf2a90..5f3dc0c11 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -76,7 +76,7 @@ jobs: uses: r-lib/actions/setup-r-dependencies@v2 with: working-directory: 'stochtree_repo' - extra-packages: any::latex2exp, any::ggplot2, any::decor, any::pkgdown, any::reticulate + extra-packages: any::latex2exp, any::ggplot2, any::decor, any::pkgdown, any::reticulate, any::bayesplot, any::coda, any::doParallel, any::foreach, any::mvtnorm, any::rpart, any::rpart.plot, any::tgp, any::rprojroot needs: website - name: Build R doc site From dcd6caba26c1ce4c8ede829a60a5883ff50a776c Mon Sep 17 00:00:00 2001 From: Drew Herren Date: Thu, 26 Mar 2026 02:42:30 -0500 Subject: [PATCH 8/8] Explicitly install stochtree R package before vignette rendering Co-Authored-By: Claude Sonnet 4.6 --- .github/workflows/docs.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 5f3dc0c11..04a0e545a 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -87,10 +87,13 @@ jobs: mkdir -p docs/R_docs/pkgdown Rscript -e 'pkgdown::build_site_github_pages("stochtree_repo/stochtree_cran", dest_dir = "../../docs/R_docs/pkgdown", install = TRUE)' + - name: Install stochtree R package for vignettes + run: Rscript -e 'install.packages("stochtree_repo/stochtree_cran", repos = NULL, type = "source")' + - name: Clean up the temporary stochtree_cran directory created run: | cd stochtree_repo - Rscript cran-cleanup.R + Rscript cran-cleanup.R cd .. - name: Install Quarto